Back to Community
10 million with 2.8 leverage (2 sharpe algo)

I tried to test this algorithm on 10M to comply with the new contest rules and it performed decently.

Clone Algorithm
72
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
from sklearn.decomposition import FastICA
import statsmodels.api as smapi
from cvxopt import matrix, solvers
from cvxopt.solvers import lp

def initialize(context):
    schedule_function(trade, date_rules.week_start(), time_rules.market_open(minutes=30))
    context.done = False
    context.univ = None
    solvers.options['show_progress'] = False

def handle_data(context, data):
    pass

def trade(context, data):
    record(l=context.account.leverage)
    prices = data.history(context.univ, "price", 90, "1d")
    prices = prices.dropna(axis=1)
   
    returns = prices.pct_change().dropna().values
    returns = np.log1p(returns)
    sources = FastICA(10, random_state=1).fit_transform(returns)
    s = smapi.add_constant(sources)

    betas = np.zeros((np.shape(returns)[1], np.shape(s)[1]))

    for i in range(0, np.shape(returns)[1]):
        model = smapi.OLS(returns[:, i], s).fit()
        betas[i, :] = model.params
        
   
    W = getweights(betas)
    
    den = np.sum(np.abs(W))
    if den == 0:
        den = 1
    for i, sid in enumerate(prices):
        order_target_value(sid, W[i] / den * context.portfolio.portfolio_value * 2.8)
     

def getweights(params):
    (m,n) = np.shape(params)
    c = -params[:, 0]
    c = matrix(c)
    A = matrix(params.T)
    b = matrix(0.0, (n, 1))
    G = np.eye(m)
    h = np.ones((m, 1)) * 10000
    res = lp(c=c, G=matrix(G), h=matrix(h, (m, 1)), A=A, b=b)    
    return res['x']



def before_trading_start(context, data):
    if context.done:
        return
    context.done = True
    fundamental_df = get_fundamentals(
        query(fundamentals.share_class_reference.symbol)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(fundamentals.valuation.market_cap > 5e9)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(500)).T
    context.univ = fundamental_df[0:500].index
  
    
There was a runtime error.
20 responses

Hi Pravin!
Was wondering, is there anyway to run pyfolio against the above algorithm.
and/or to see the weaknesses and strengths of the above algorithm.
To gain a further understanding and perform more analysis on ways to improve it.
many thanks,
Best,
Andrew

Nice algo!

Just to be clear, this solving for portfolios hedged against the top-10 ICA-discovered factors, right?

@Simon. yes.

Here's a longer backtest. Seems like it doesn't do so well under all market conditions?

Clone Algorithm
4
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
from sklearn.decomposition import FastICA
import statsmodels.api as smapi
from cvxopt import matrix, solvers
from cvxopt.solvers import lp

def initialize(context):
    schedule_function(trade, date_rules.week_start(), time_rules.market_open(minutes=30))
    context.done = False
    context.univ = None
    solvers.options['show_progress'] = False

def handle_data(context, data):
    pass

def trade(context, data):
    record(l=context.account.leverage)
    prices = data.history(context.univ, "price", 90, "1d")
    prices = prices.dropna(axis=1)
   
    returns = prices.pct_change().dropna().values
    returns = np.log1p(returns)
    sources = FastICA(10, random_state=1).fit_transform(returns)
    s = smapi.add_constant(sources)

    betas = np.zeros((np.shape(returns)[1], np.shape(s)[1]))

    for i in range(0, np.shape(returns)[1]):
        model = smapi.OLS(returns[:, i], s).fit()
        betas[i, :] = model.params
        
   
    W = getweights(betas)
    
    den = np.sum(np.abs(W))
    if den == 0:
        den = 1
    for i, sid in enumerate(prices):
        order_target_value(sid, W[i] / den * context.portfolio.portfolio_value * 2.8)
     

def getweights(params):
    (m,n) = np.shape(params)
    c = -params[:, 0]
    c = matrix(c)
    A = matrix(params.T)
    b = matrix(0.0, (n, 1))
    G = np.eye(m)
    h = np.ones((m, 1)) * 10000
    res = lp(c=c, G=matrix(G), h=matrix(h, (m, 1)), A=A, b=b)    
    return res['x']



def before_trading_start(context, data):
    if context.done:
        return
    context.done = True
    fundamental_df = get_fundamentals(
        query(fundamentals.share_class_reference.symbol)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(fundamentals.valuation.market_cap > 5e9)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(500)).T
    context.univ = fundamental_df[0:500].index
  
    
There was a runtime error.

Here is long backtest of simple algo(50-50-252 SHY,IEF,SPY), Anthony FJ Garner idea, with the same leverage to compare.

Clone Algorithm
74
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# 50-50-252  SHY, IEF, SPY 

def initialize(context):
    set_symbol_lookup_date('2015-01-01')
    context.assets = symbols( 'SHY', 'IEF', 'SPY', )    
    schedule_function(trade, date_rules.month_start(),time_rules.market_open())
    set_slippage(slippage.FixedSlippage(spread=0.00))
    
def trade(context, data):
    
    n = 2
    period = 252
    lev = 2.8
    
    prices = data.history(context.assets, 'price', period + 1, '1d')    
    C = prices                               
    R = C.iloc[-1] / C.iloc[0] -1.  # Returns
    R = R.dropna()
    R.sort(ascending = False)
    picks = R.head(n)   #  .head .tail
    wt = lev / len(picks) if len(picks) !=0 else 0
    
    for asset in context.assets:
        if asset in picks.index and data.can_trade(asset):             
            order_target_percent(asset, wt)
        else:
            order_target(asset, 0)
    record(Leverage = context.account.leverage) 
    
'''


START
01/02/2004
END 
07/15/2016 


'''
There was a runtime error.

Tell you one things chaps, you ain't gonna be able to borrow cheaper than the US Treasury (SHY,IEF)! You could of course look at the futures markets and see how an implementation works out there but this is assuredly not going to work paying brokers margin rates.

Another no brain 3 assets, rebalanced monthly , equally weighted algo, proved by bactesting back to 1870, has comparable or better results with leverage only 1.8.
Why do we need that long-short nano technology and pay additional margin rate?

Clone Algorithm
12
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# 3 assets rebalanced monthly  equally weighted 

def initialize(context):
    context.assets = symbols('IEF', 'TLT', 'SPY', )    
    schedule_function(trade, date_rules.month_end(),time_rules.market_open( minutes=45))
    
def trade(context, data):    
    lev = 1.8
    wt = lev / len(context.assets) if len(context.assets) !=0 else 0
    
    for asset in context.assets:
        order_target_percent(asset, wt)
            
    record(Leverage = context.account.leverage)       
    
'''
START
08/01/2003
END 
07/15/2016

'''
There was a runtime error.

I think the idea of using the first N components to hedge against is pretty neat, and will definitely cover the majority of the variance in the training set. Though as Grant pointed out and this was confirmed by my own playing with the algo, it looks like it fails out of sample, especially in times of crisis. The main issue I see is that this algo is making an implicit prediction that future variance in the selected equities will be the same or similar to the training set, that the selected components we find and hedge against will still represent significant exposure in the future.

Unfortunately this is further compounded by the fact we have also defined explicit parameters (e.g. 90 days in the original post) so we are making an even more specific prediction: the ICA risk exposures of the past 90 days will be the same over the next N days. I tried to fix this issue by doing a random sampling (n=100) of potentially overlapping time frames over the past year and then taking the mean weight returned by the function. It seems a little better in regards to the quality of the weights, turnover is significantly lower and the algo tends to move in and out positions in a much smoother fashion.

Clone Algorithm
23
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
import pandas as pd
from sklearn.decomposition import FastICA
import statsmodels.api as smapi
from cvxopt import matrix, solvers
from cvxopt.solvers import lp

from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.factors import CustomFactor, AverageDollarVolume
from quantopian.pipeline.data import morningstar as mstar
from quantopian.pipeline.filters.morningstar import IsPrimaryShare

def make_pipeline():
    # Create our pipeline  
    pipe = Pipeline()  
    market_cap = mstar.valuation.market_cap.latest
    pipe.add(market_cap, "market_cap")
    pipe.set_screen(market_cap.top(100, mask=universe_filters()))
    
    return pipe
    

def initialize(context):
    schedule_function(plot, date_rules.every_day(), time_rules.market_close())
    schedule_function(trade, date_rules.week_start(), time_rules.market_open(minutes=30))
    context.done = False
    context.univ = None
    solvers.options['show_progress'] = False
    attach_pipeline(make_pipeline(), 'top_500')

    
def handle_data(context, data):
    pass


def plot(context, data):
    record(leverage=context.account.leverage)

    
def ICA_weights(pd_returns):
    
    returns = np.log1p(pd_returns.values)
    sources = FastICA(10, random_state=1).fit_transform(returns)
    s = smapi.add_constant(sources)

    betas = np.zeros((np.shape(returns)[1], np.shape(s)[1]))

    for i in range(0, np.shape(returns)[1]):
        model = smapi.OLS(returns[:, i], s).fit()
        betas[i, :] = model.params
   
    return pd.Series(index=pd_returns.columns, data=getweights(betas))
    
def trade(context, data):
    
    prices = data.history(context.univ, "price", 252, "1d")
    prices = prices.dropna(axis=1)
    returns = prices.pct_change().dropna()
    
    results = pd.DataFrame()
    for _ in range(100):
        slice_vals = [np.random.randint(0, len(returns)), np.random.randint(0, len(returns))]
        start = np.min(slice_vals)
        end = np.max(slice_vals)
        label = (start, end)
        if end - start > 50 and label not in results.columns:
            current_rets = returns[start: end]
            try:
                results[label] = ICA_weights(current_rets)
            except:
                print "ICA Weighting Failed"
                continue
    
    
    W = results.T.mean()
    den = np.sum(np.abs(W))
    if den == 0:
        den = 1
    for security in W.index:
        if data.can_trade(security):
            order_target_value(security, W[security] / den * context.portfolio.portfolio_value * 2.8)
        
    for security in context.portfolio.positions:
        if security not in W.index and data.can_trade(security):
            order_target_percent(security, 0)
            
     

def getweights(params):
    (m,n) = np.shape(params)
    c = -params[:, 0]
    c = matrix(c)
    A = matrix(params.T)
    b = matrix(0.0, (n, 1))
    G = np.eye(m)
    h = np.ones((m, 1)) * 10000
    res = lp(c=c, G=matrix(G), h=matrix(h, (m, 1)), A=A, b=b)    
    return res['x']

def before_trading_start(context, data):
    context.univ = pipeline_output("top_500").index

# Constants that need to be global
COMMON_STOCK= 'ST00000001'

SECTOR_NAMES = {
                 101: 'Basic Materials',
                 102: 'Consumer Cyclical',
                 103: 'Financial Services',
                 104: 'Real Estate',
                 205: 'Consumer Defensive',
                 206: 'Healthcare',
                 207: 'Utilities',
                 308: 'Communication Services',
                 309: 'Energy',
                 310: 'Industrials',
                 311: 'Technology' ,
                }

# Average Dollar Volume without nanmean, so that recent IPOs are truly removed
class ADV_adj(CustomFactor):
    inputs = [USEquityPricing.close, USEquityPricing.volume]
    window_length = 252
    
    def compute(self, today, assets, out, close, volume):
        close[np.isnan(close)] = 0
        out[:] = np.mean(close * volume, 0)
                


def universe_filters():
    """
    Create a Pipeline producing Filters implementing common acceptance criteria.
    
    Returns
    -------
    zipline.Filter
        Filter to control tradeablility
    """

    # Equities with an average daily volume greater than 750000.
    high_volume = (AverageDollarVolume(window_length=252) > 750000)
    
    # Not Misc. sector:
    sector_check = Sector().notnull()
    
    # Equities that morningstar lists as primary shares.
    # NOTE: This will return False for stocks not in the morningstar database.
    primary_share = IsPrimaryShare()
    
    # Equities for which morningstar's most recent Market Cap value is above $300m.
    have_market_cap = mstar.valuation.market_cap.latest > 300000000
    
    # Equities not listed as depositary receipts by morningstar.
    # Note the inversion operator, `~`, at the start of the expression.
    not_depositary = ~mstar.share_class_reference.is_depositary_receipt.latest
    
    # Equities that listed as common stock (as opposed to, say, preferred stock).
    # This is our first string column. The .eq method used here produces a Filter returning
    # True for all asset/date pairs where security_type produced a value of 'ST00000001'.
    common_stock = mstar.share_class_reference.security_type.latest.eq(COMMON_STOCK)
    
    # Equities whose exchange id does not start with OTC (Over The Counter).
    # startswith() is a new method available only on string-dtype Classifiers.
    # It returns a Filter.
    not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')
    
    # Equities whose symbol (according to morningstar) ends with .WI
    # This generally indicates a "When Issued" offering.
    # endswith() works similarly to startswith().
    not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')
    
    # Equities whose company name ends with 'LP' or a similar string.
    # The .matches() method uses the standard library `re` module to match
    # against a regular expression.
    not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\. ]?P\.?$')
    
    # Equities with a null entry for the balance_sheet.limited_partnership field.
    # This is an alternative way of checking for LPs.
    not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()
    
    # Highly liquid assets only. Also eliminates IPOs in the past 12 months
    # Use new average dollar volume so that unrecorded days are given value 0
    # and not skipped over
    # S&P Criterion
    liquid = ADV_adj() > 250000
    
    # Add logic when global markets supported
    # S&P Criterion
    domicile = True
    
    # Keep it to liquid securities
    ranked_liquid = ADV_adj().rank(ascending=False) < 1500
    
    universe_filter = (high_volume & primary_share & have_market_cap & not_depositary &
                      common_stock & not_otc & not_wi & not_lp_name & not_lp_balance_sheet &
                    liquid & domicile & sector_check & liquid & ranked_liquid)
    
    return universe_filter
  
    
There was a runtime error.
Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

Here is a notebook comparing the original algo modified to only trade the top 100 to my version with sampling. The way it changes positions is quite noticeably in comparison.

Loading notebook preview...
Notebook previews are currently unavailable.

Vlad why are you dissing other people's algos by posting other totally unrelated algos that have superficially similar characteristics? And it's not just you, there's a thick atmosphere of naively fatalistic skepticism these days, and the algo-peen one-upmanship has gotten out of hand. If someone's goal is to repeatedly try to demonstrate that algos are worthless, doing so on the message board of a company devoted to finding worthy algos seems like rude trolling, if not some kind of self-promotion (which has also been prevalent lately).

Pravin is one of the few people consistently continuing to share non-trivial ideas...

EDIT: You know what, forget it, it's none of my business. You all do what you like, I shouldn't criticize what people want to post on an open forum.

I've seen a variety of posts, as well, along the lines of "Why would anyone ever consider some complicated, beta-neutral algo dealing in large baskets of stocks when one could simply stir together a few ETFs, with perhaps a little spice, and call it a day?" I think it is reasonable to throw up a few baseline examples, as Vlad did. Asset allocation is a valid style of systematic investing, although perhaps not what Q is interested in at this point. Part of the issue, I think, is that Q has not really explained what they are doing, and why, and why the type of algos Vlad and others have posted are not attractive (or maybe they are--who knows). Certainly, if it is true that one can achieve similar long-term returns with the same or less leverage, then it is worth a head-scratch to consider if anything more sophisticated is justified.

As far as Pravin's post, it looks like something that might do o.k. in the contest, so more power to him. If someone can explain what it does in a few clear paragraphs, I'd be interested (no references to papers, no fancy math terms--simple, intuitive talk only). What's that code doing?

It's using Independent Component Analysis (the same tech that can be used to isolate specific conversations from recordings of cocktail parties, very cool) to identify 10 common independent drivers of the returns of the top 500 stocks, if I recall correctly. Then it's using a convex solver to minimize the portfolios residual exposure to all those factors/drivers.

Simon,
Pravin is one of the few people consistently continuing to share non-trivial ideas...
I agree with that.
For the rest you wrote..Who are you to judge?
But this is topic for another thread.

Here's James' version, over a longer time period. Seems like you'd just end up borrowing a lot of money to approximate SPY.

Clone Algorithm
1
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
import pandas as pd
from sklearn.decomposition import FastICA
import statsmodels.api as smapi
from cvxopt import matrix, solvers
from cvxopt.solvers import lp

from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.factors import CustomFactor, AverageDollarVolume
from quantopian.pipeline.data import morningstar as mstar
from quantopian.pipeline.filters.morningstar import IsPrimaryShare

def make_pipeline():
    # Create our pipeline  
    pipe = Pipeline()  
    market_cap = mstar.valuation.market_cap.latest
    pipe.add(market_cap, "market_cap")
    pipe.set_screen(market_cap.top(100, mask=universe_filters()))
    
    return pipe
    

def initialize(context):
    schedule_function(plot, date_rules.every_day(), time_rules.market_close())
    schedule_function(trade, date_rules.week_start(), time_rules.market_open(minutes=30))
    context.done = False
    context.univ = None
    solvers.options['show_progress'] = False
    attach_pipeline(make_pipeline(), 'top_500')

    
def handle_data(context, data):
    pass


def plot(context, data):
    record(leverage=context.account.leverage)

    
def ICA_weights(pd_returns):
    
    returns = np.log1p(pd_returns.values)
    sources = FastICA(10, random_state=1).fit_transform(returns)
    s = smapi.add_constant(sources)

    betas = np.zeros((np.shape(returns)[1], np.shape(s)[1]))

    for i in range(0, np.shape(returns)[1]):
        model = smapi.OLS(returns[:, i], s).fit()
        betas[i, :] = model.params
   
    return pd.Series(index=pd_returns.columns, data=getweights(betas))
    
def trade(context, data):
    
    prices = data.history(context.univ, "price", 252, "1d")
    prices = prices.dropna(axis=1)
    returns = prices.pct_change().dropna()
    
    results = pd.DataFrame()
    for _ in range(100):
        slice_vals = [np.random.randint(0, len(returns)), np.random.randint(0, len(returns))]
        start = np.min(slice_vals)
        end = np.max(slice_vals)
        label = (start, end)
        if end - start > 50 and label not in results.columns:
            current_rets = returns[start: end]
            try:
                results[label] = ICA_weights(current_rets)
            except:
                print "ICA Weighting Failed"
                continue
    
    
    W = results.T.mean()
    den = np.sum(np.abs(W))
    if den == 0:
        den = 1
    for security in W.index:
        if data.can_trade(security):
            order_target_value(security, W[security] / den * context.portfolio.portfolio_value * 2.8)
        
    for security in context.portfolio.positions:
        if security not in W.index and data.can_trade(security):
            order_target_percent(security, 0)
            
     

def getweights(params):
    (m,n) = np.shape(params)
    c = -params[:, 0]
    c = matrix(c)
    A = matrix(params.T)
    b = matrix(0.0, (n, 1))
    G = np.eye(m)
    h = np.ones((m, 1)) * 10000
    res = lp(c=c, G=matrix(G), h=matrix(h, (m, 1)), A=A, b=b)    
    return res['x']

def before_trading_start(context, data):
    context.univ = pipeline_output("top_500").index

# Constants that need to be global
COMMON_STOCK= 'ST00000001'

SECTOR_NAMES = {
                 101: 'Basic Materials',
                 102: 'Consumer Cyclical',
                 103: 'Financial Services',
                 104: 'Real Estate',
                 205: 'Consumer Defensive',
                 206: 'Healthcare',
                 207: 'Utilities',
                 308: 'Communication Services',
                 309: 'Energy',
                 310: 'Industrials',
                 311: 'Technology' ,
                }

# Average Dollar Volume without nanmean, so that recent IPOs are truly removed
class ADV_adj(CustomFactor):
    inputs = [USEquityPricing.close, USEquityPricing.volume]
    window_length = 252
    
    def compute(self, today, assets, out, close, volume):
        close[np.isnan(close)] = 0
        out[:] = np.mean(close * volume, 0)
                


def universe_filters():
    """
    Create a Pipeline producing Filters implementing common acceptance criteria.
    
    Returns
    -------
    zipline.Filter
        Filter to control tradeablility
    """

    # Equities with an average daily volume greater than 750000.
    high_volume = (AverageDollarVolume(window_length=252) > 750000)
    
    # Not Misc. sector:
    sector_check = Sector().notnull()
    
    # Equities that morningstar lists as primary shares.
    # NOTE: This will return False for stocks not in the morningstar database.
    primary_share = IsPrimaryShare()
    
    # Equities for which morningstar's most recent Market Cap value is above $300m.
    have_market_cap = mstar.valuation.market_cap.latest > 300000000
    
    # Equities not listed as depositary receipts by morningstar.
    # Note the inversion operator, `~`, at the start of the expression.
    not_depositary = ~mstar.share_class_reference.is_depositary_receipt.latest
    
    # Equities that listed as common stock (as opposed to, say, preferred stock).
    # This is our first string column. The .eq method used here produces a Filter returning
    # True for all asset/date pairs where security_type produced a value of 'ST00000001'.
    common_stock = mstar.share_class_reference.security_type.latest.eq(COMMON_STOCK)
    
    # Equities whose exchange id does not start with OTC (Over The Counter).
    # startswith() is a new method available only on string-dtype Classifiers.
    # It returns a Filter.
    not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')
    
    # Equities whose symbol (according to morningstar) ends with .WI
    # This generally indicates a "When Issued" offering.
    # endswith() works similarly to startswith().
    not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')
    
    # Equities whose company name ends with 'LP' or a similar string.
    # The .matches() method uses the standard library `re` module to match
    # against a regular expression.
    not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\. ]?P\.?$')
    
    # Equities with a null entry for the balance_sheet.limited_partnership field.
    # This is an alternative way of checking for LPs.
    not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()
    
    # Highly liquid assets only. Also eliminates IPOs in the past 12 months
    # Use new average dollar volume so that unrecorded days are given value 0
    # and not skipped over
    # S&P Criterion
    liquid = ADV_adj() > 250000
    
    # Add logic when global markets supported
    # S&P Criterion
    domicile = True
    
    # Keep it to liquid securities
    ranked_liquid = ADV_adj().rank(ascending=False) < 1500
    
    universe_filter = (high_volume & primary_share & have_market_cap & not_depositary &
                      common_stock & not_otc & not_wi & not_lp_name & not_lp_balance_sheet &
                    liquid & domicile & sector_check & liquid & ranked_liquid)
    
    return universe_filter
  
    
There was a runtime error.

The problem with this algorithm is that we assume that if we hedge the exposure that stocks will outperform. Probably we should find two portfolio (winners and losers) and then hedge their exposures and go long one basket and short the other.

For the strategy I'm playing around with, I don't attempt to find two equal-weight baskets, one long and one short. Rather, the relative weights of the long and short baskets are not constrained (e.g. at any point in time, the algo could be all long, or all short, or an arbitrary ratio). Beta is reduced to ~ 0 by adding a position in an ETF (e.g. SPY).

What is the basic outline of your algo? From Simon's explanation, I don't understand the principle behind it. Why would you expect it to be profitable? Or is it just expected to be a smoothed version of SPY that then gets boosted with leverage?

You might try a much shorter time scale (e.g. 5 days of minutely data, perhaps smoothed?).

very impressive! Pravin

Thanks Lake Austin. It fails to perform during 2008-09 crisis and I am still working to find out why.

well, no offense, my feeling is no pure stock strategy could survive in such 08-09 crisis and the best strategy is avoid.