Back to Community
Great Sharpe ratio with consumer staples sector strategy - TOO GOOD TO BE TRUE?

Hi all,

I recently implemented a VERY SIMPLE strategy with the consumer staples sector. It generates 736% total returns, a sharpe of 1.32, -34% max drawdown between 2003 and 2018. Obviously, this strategy does not meet all the risk criteria of Quantopian contest, but I'm thinking here more for personal use. Would anyone mind taking a look and telling me whether this strategy is too good to be true? If a common factor is this widely known and accessible, why it has not been used in the real world? Here is the strategy overview:

  • Filter: SP500
  • Commission: $5/ trade
  • Slippage: 5 basis points
  • Rebalance: every 4 weeks
  • Position: long 10 stocks with the lowest P/E
  • Leverage: 1x

This is stupidly simple.... but just check it out :)

Clone Algorithm
33
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
import quantopian.optimize as opt
import quantopian.algorithm as algo
from quantopian.pipeline.data import Fundamentals, factset
from quantopian.pipeline.filters import QTradableStocksUS, Q500US
from quantopian.pipeline.classifiers.fundamentals import Sector

MAX_GROSS_LEVERAGE = 1.0
TOTAL_POSITIONS = 10
def initialize(context):
    schedule_function(func = rebalance,
                      date_rule = date_rules.month_start(),
                      time_rule = time_rules.market_open(),
                      )
    attach_pipeline(make_pipeline(),'CONSUMER_DEFENSIVE')
    
    set_commission(commission.PerTrade(5))
    set_slippage(slippage.FixedBasisPointsSlippage(basis_points=5))

def make_pipeline():
    POR = factset.Fundamentals.pay_out_ratio_af.latest
    PE = Fundamentals.pe_ratio.latest
    PFCF = Fundamentals.fcf_ratio.latest
    
    sp_500 = Q500US()
    
    universe = Q500US() & Sector.eq(Sector(), Sector.CONSUMER_DEFENSIVE) #& (POR <= 0.0000000000001)
    #filter1 = PFCF.bottom(20, universe)
    longs = PE.bottom(10, universe)
    return Pipeline(
        columns={
            #'POR': POR,
            'longs': longs,
            'sector': Sector(),
        },
        screen =  universe & longs
    )

def before_trading_start(context, data):
    context.pipeline_output = pipeline_output('CONSUMER_DEFENSIVE')
    context.longs = context.pipeline_output[context.pipeline_output['longs']].index
    
def rebalance(context,data):
    my_positions = context.portfolio.positions
    long_weight = 1.0/TOTAL_POSITIONS
    
    for security in my_positions:
        if security not in context.longs and data.can_trade(security):
            order_target_percent(security, 0)
    
    for security in context.longs:
        if data.can_trade(security):
            if security not in my_positions:
               order_target_percent(security, long_weight)
        else:
            log.info("Didn't open position")
There was a runtime error.
14 responses

Hello Nam, these are impressive numbers! Returns like these aren't impossible to achieve through algorithmic trading, but it is also smart to make sure our algo's can survive and still perform under certain pressures.

Does this one still perform the same with smaller dollar amounts ($50k, $100k, $1M, $5M)? Or Larger? How does it do on 1 year intervals?

You're certainly on to something here!

The returns look good in the past since consumer defensive sector was the best performing sector during the recession. The real question is: Do you still beat the market after taxes?

Brief dips into margin up to 4M (custom chart) surely from partial fills and thus scheduling sells some time before buys may help.

Clone Algorithm
13
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
import quantopian.optimize as opt
import quantopian.algorithm as algo
from quantopian.pipeline.data import Fundamentals, factset
from quantopian.pipeline.filters import QTradableStocksUS, Q500US
from quantopian.pipeline.classifiers.fundamentals import Sector

MAX_GROSS_LEVERAGE = 1.0
TOTAL_POSITIONS = 10

def initialize(context):
    schedule_function(func = rebalance,
                      date_rule = date_rules.month_start(),
                      time_rule = time_rules.market_open(),
                      )
    attach_pipeline(make_pipeline(),'CONSUMER_DEFENSIVE')

    set_commission(commission.PerTrade(5))
    set_slippage(slippage.FixedBasisPointsSlippage(basis_points=5))

    context.csh_low = context.portfolio.cash
    context.mxlv = 0
    for i in range(1, 391):
        schedule_function(extremes, date_rules.every_day(), time_rules.market_open(minutes=i))

def make_pipeline():
    #POR = factset.Fundamentals.pay_out_ratio_af.latest
    PE = Fundamentals.pe_ratio.latest
    #PFCF = Fundamentals.fcf_ratio.latest

    #sp_500 = Q500US()

    universe = Q500US() & Sector.eq(Sector(), Sector.CONSUMER_DEFENSIVE) #& (POR <= 0.0000000000001)
    #filter1 = PFCF.bottom(20, universe)
    longs = PE.bottom(10, universe)
    return Pipeline(
        columns={
            #'POR': POR,
            'pe'    : PE,
            'longs' : longs,
            'sector': Sector(),
        },
        screen =  universe & longs
    )

def before_trading_start(context, data):
    context.pipeline_output = pipeline_output('CONSUMER_DEFENSIVE')
    context.longs = context.pipeline_output[context.pipeline_output['longs']].index

    if context.portfolio.cash != context.portfolio.starting_cash:
        record(cash = context.portfolio.cash)

    do_log_preview = 0    # a way to toggle this off when it becomes annoying
    if do_log_preview:
        try:    context.log_data_done
        except: log_data(context, context.pipeline_output, 9)        # show pipe info once
        
def rebalance(context,data):
    my_positions = context.portfolio.positions
    long_weight = 1.0 / TOTAL_POSITIONS

    for security in my_positions:
        if not data.can_trade(security): continue
        if security not in context.longs:
            order_target_percent(security, 0)

    for security in context.longs:
        if not data.can_trade(security): continue
        if security not in my_positions:
           order_target_percent(security, long_weight)
        else:
            log.info("Didn't open position")

def extremes(context, data):
    if context.account.leverage > context.mxlv:
        context.mxlv = context.account.leverage
        record(MaxLv = context.mxlv)
    if context.portfolio.cash < context.csh_low:
        context.csh_low = context.portfolio.cash
        record(CashLow = context.csh_low)

def log_data(context, z, num, fields=None):
    ''' Log info about pipeline output or, z can be any DataFrame or Series
    https://quantopian.com/posts/overview-of-pipeline-content-easy-to-add-to-your-backtest
    '''
    if not len(z):
        log.info('Empty pipe')
        return

    try: context.log_data_done
    except:
        log.info('starting_cash ${:,}   portfolio ${:,}     {} positions ...'.format(
            int(context.portfolio.cash),
            int(context.portfolio.portfolio_value),
            len(context.portfolio.positions)
        ))
        context.log_data_done = 1

    # Options
    log_nan_only = 0          # Only log if nans are present.
    show_sectors = 0          # If sectors, see them or not.
    show_sorted_details = 1   # [num] high & low securities sorted, each column.
    padmax = 6                # num characters for each field, starting point.

    def out(lines):  # log data lines of output efficiently
        buffer_len = 1024   # each group
        chunk = ':'
        for line in lines:
            if line is None or not len(line):
                continue    # skip if empty string for example
            if len(chunk) + len(line) < buffer_len:
                # Add to chunk if will still be under buffer_len
                chunk += '\n{}'.format(line)
            else:  # Or log chunk and start over with new line.
                log.info(chunk)
                chunk = ':\n{}'.format(line)
        if len(chunk) > 2:       # if anything remaining
            log.info(chunk)

    if 'MultiIndex' in str(type(z.index)):
        log.info('Found MultiIndex, not set up to handle it, bailing out of log_data()')
        return
    # Change index to just symbols for readability, meanwhile, right-aligned
    z = z.rename(index=dict(zip(z.index.tolist(), [i.symbol.rjust(6) for i in z.index.tolist()])))

    # Series ......
    if 'Series' in str(type(z)):    # is Series, not DataFrame
        nan_count = len(z[z != z])
        nan_count = 'NaNs {}/{}'.format(nan_count, len(z)) if nan_count else ''
        if (log_nan_only and nan_count) or not log_nan_only:
            pad = max( padmax, len('%.5f' % z.max()) )
            log.info('{}{}{}   Series  len {}'.format('min'.rjust(pad+5),
                'mean'.rjust(pad+5), 'max'.rjust(pad+5), len(z)))
            log.info('{}{}{} {}'.format(
                ('%.5f' % z.round(6). min()).rjust(pad+5),
                ('%.5f' % z.round(6).mean()).rjust(pad+5),
                ('%.5f' % z.round(6). max()).rjust(pad+5),
                nan_count
            ))
            log.info('High\n{}'.format(z.sort_values(ascending=False).head(num)))
            log.info('Low\n{}' .format(z.sort_values(ascending=False).tail(num)))
        return

    # DataFrame ......
    content_min_max = [ ['','min','mean','max',''] ] ; content = []
    for col in z.columns:
        try: z[col].max()
        except: continue   # skip non-numeric
        if col == 'sector' and not show_sectors: continue
        nan_count = len(z[col][z[col] != z[col]])
        nan_count = 'NaNs {}/{}'.format(nan_count, len(z)) if nan_count else ''
        padmax    = max( padmax, len(str(z[col].max())) ) ; mean_ = ''
        if len(str(z[col].max())) > 8 and 'float' in str(z[col].dtype):
            z[col] = z[col].round(6)   # Reduce number of decimal places for floating point values
        if 'float' in str(z[col].dtype): mean_ = str(round(z[col].mean(), 6))
        elif 'int' in str(z[col].dtype): mean_ = str(round(z[col].mean(), 1))
        content_min_max.append([col, str(z[col] .min()), mean_, str(z[col] .max()), nan_count])
    if log_nan_only and nan_count or not log_nan_only:
        log.info('Rows: {}  Columns: {}'.format(z.shape[0], z.shape[1]))
        if len(z.columns) == 1: content.append('Rows: {}'.format(z.shape[0]))

        paddings = [6 for i in range(4)]
        for lst in content_min_max:    # set max lengths
            i = 0
            for val in lst[:4]:    # value in each sub-list
                paddings[i] = max(paddings[i], len(str(val)))
                i += 1
        headr = content_min_max[0]
        content.append(('{}{}{}{}{}'.format(
             headr[0] .rjust(paddings[0]),
            (headr[1]).rjust(paddings[1]+5),
            (headr[2]).rjust(paddings[2]+5),
            (headr[3]).rjust(paddings[3]+5),
            ''
        )))
        for lst in content_min_max[1:]:    # populate content using max lengths
            content.append(('{}{}{}{}     {}'.format(
                lst[0].rjust(paddings[0]),
                lst[1].rjust(paddings[1]+5),
                lst[2].rjust(paddings[2]+5),
                lst[3].rjust(paddings[3]+5),
                lst[4],
            )))
    out(content)

    if not show_sorted_details: return
    if len(z.columns) == 1:     return     # skip detail if only 1 column
    if fields == None: details = z.columns
    content = []
    for detail in details:
        if detail == 'sector' and not show_sectors: continue
        hi = z[details].sort_values(by=detail, ascending=False).head(num)
        lo = z[details].sort_values(by=detail, ascending=False).tail(num)
        content.append(('_ _ _   {}   _ _ _'  .format(detail)))
        content.append(('{} highs ...\n{}'.format(detail, str(hi))))
        content.append(('{} lows  ...\n{}'.format(detail, str(lo))))
        if log_nan_only and not len(lo[lo[detail] != lo[detail]]):
            continue  # skip if no nans
    out(content)

There was a runtime error.

Hey Jack Grebin I've tested it with different cash amounts (apparently smaller funds led to lower leverage because there's no partial fills) and the total returns are still spectacular.

Hey Legendary Mode do you know any good method to incorporate taxes on Quantopian? Much appreciated!

Hi Nam,
I understand your comment about criteria, and caveat "for personal use" rather than Q contests, and that's all OK. As you say, for a very simple strategy it looks almost too good to be true ... but still it might be and, all other things equal, simpler is generally better. In the case of personal use, i'm not sure that scaling is necessarily a problem and, for some people/entities, taxes may not necessarily be an issue either. OK, so far, so good. However what concerns me most is the question of HOW exactly did you actually come up with this "strategy"?

I hope this question will provoke some thoughtful discussion not only about Nam's specific strategy mentioned here, but also about the whole concept of system strategy design in general.

If you already know that something (e.g. a single star performer stock, sector or whatever) performed well over some piece of past history, then is selecting it actually a "strategy" at all, or is it just driving by looking in the rear-view mirror? Demonstrating that something performed well in the past is not necessarily a valid form of "back-testing", and is neither a necessary nor sufficient condition to reassure anyone that it will continue to perform well in future. What might lead you to believe that the strategy Nam found here (or alternatively any other stock, ETF or whatever) that performed well in the past is at all likely to continue to be useful going forward?

Hi Tony Morland,

I appreciate your point. I think that nobody can "predict" the future. That is why we need to look deeply into the past because that is all we got.

Whether to call it a "strategy" or not, I think it depends on your definition. To me a strategy is any reasonable hypothesis that can be tested and duplicated easily.

Nam there isn't an easy way to incorporate taxes in Quantopian. However, you can approximate taxes
\begin{align}
annual\ returns = (1 + 7.36)^{1/15} = 1.152 \\
approximate\ returns\ after\ taxes = .152(1-taxrate)
\end{align} With 40% tax rate you make 9% a year or 264% in 15 years vs. 305.88% in SPY ETF

Hi Nam,
Yes, i believe you're right, no-one can actually predict the future. Most who claim to are charlatans, and if anyone can, then they sure aren't telling us :)) Therefore I also agree with the corollary that, as you say: "we need to look deeply into the past because that is all we got".
The semantics of what we call a "strategy" is not really a key issue here, and i certainly have no argument with your definition.

However what i am really trying to do is to elicit discussion, whether from you or from anyone else, about HOW can we best go about looking at the past so as to form what are likely to be the most useful (i.e. successful going forward) sorts of trading strategies?

Here are some "for examples":
Do we have any understanding of the process and/or the reasons why price might have behaved in a certain way in the past and why that might or might not continue be applicable going forward? Putting this another way, if we have some sort of underlying conceptual model then does this help us to believe that our strategy might be robust?

Did we (you, Nam, in this case) actually have an idea FIRST and then test it with UNSEEN data, or simply look at a bunch of past data? Yes you are right in a sense that the past is all we have, but the way we choose to handle this is potentially likely to make a difference to the quality of our conclusions and therefore to strategy or trading system development. Please allow me to illustrate with an amusing story, a Trader's joke:

Three old friends go to a casino in the Australian outback where an illegal gambling game called "Two-Up" is being played, with betting based on the outcome of tossing some coins. The 3 friends watch in amazement as the tossed coins come up 14 Heads in a row. The first friend is a bartender and he says: "OK, now i'm gonna bet on Tails. The Law of Averages says that 14 Heads in a row is nearly impossible so the next throw has GOTTA come up Tails, just by the Law of Averages!" The 2nd friend is a Statistics Professor and he says: " No, no, you are wrong and you are making a very common mistake. The individual coin tosses are independent trials and the next outcome has no dependence whatsoever on prior outcomes. So the chance of H or T being next is 50/50, irrespective of however many of anything just came up in a row beforehand". The 3rd friend is a Trader. He says: "Nah, you're both wrong and i'm gonna bet on Heads. No way that its an unbiased coin!!" :)) OK, so who of them is right, based on the observed data that they have?

Moral of the story: Be careful what we conclude from our data! What else might be going on? Maybe some "hidden variables" for example?

Hi Tony Morland,

The hidden variable here, I think, is the characteristics of sectors/industries. There are a lot of books/papers out there that give advice on how to filter out the best stocks based on some fundamental data, while discarding how those ratios apply differently to different sector.

Now, here is what I think is an underlying conceptual model to develop ANY STRATEGY:
1) Form a reasonable hypothesis - for instance: RESPECTABLE tech companies that have low dividend payout ratio reinvest their money and will have higher returns in the future.
2) Choose some factors / ratios that may express that hypothesis.
3) Write code/ develop strategies and backtest over at least 1 bull and 1 bear market.
4) Tweak and combine that factor with other factors to see whether it is influenced by something else and not actually the "elementary" factor at play

I did not look at a bunch of data. This strategy I was inspired by a book. Looking to test every single ratio out there or coming up with new factors (as a lot of traders do with technical indicators) is simply ineffective and wasteful.

Thank you for the story. I am trying to use quantitative finance in a new direction - fundamental analysis. Besides, I think Quantopian's contest algorithm and their risk metrics can only apply to big funds. They are forcing us to use long/short strategy and that's a shame. Here is the thing: if you have an opportunity to long a good stock, why bother shorting a bad one?

Best,
Nam

Hi Nam,

Yes, i agree that many fundamental ratios definitely apply in different ways to different Sectors. Within any given sector, it may often be very helpful to normalize or compare individual company ratios with that of the sector median value.

Your conceptual model for strategy development makes good sense.

With regard to item 1) "respectable tech companies", and fundamental ratios, a good case-study example is Amazon. Based on PE ratios in the past, Amazon has been "expensive". However Amazon's profits were relatively low COMPARED to what they could have been for one very good reason: Amazon was plowing (= ploughing, depending on where you come from :)) a lot of its earnings back into well-considered R&D expenditure. So although Amazon LOOKED "expensive", what it was really doing was making a conscious choice in setting itself up very carefully for the strongest possible long-term future, rather than maximizing profits in the short-term. Smart CEO there!

Would you care to share with us what was the book that inspired you? Maybe some of us have read it too....

Regarding your comments on Q's contest, equity long/short, etc, the fact is this: Q is running a fund, they want to get a diverse range of algos they can use for the fund, and that's the purpose of the contest. The people who invest in Q's fund want stable returns with very low drawdowns (DD) which are uncorrelated with movements of the general market. This can be achieved most easily with a L/S strategy, so that's why they use it and why it forms the basis of the contest. The rationale behind going Long a good stock and simultaneously Shorting a bad one is to remain market neutral, just in case the whole market takes an unexpected dive!

On the other hand, for strictly personal use, you might have a much higher tolerance for DDs and for returns that are correlated with the overall market, especially if you are using s system that you developed yourself, tested carefully, and confident about. Just to share with you my own situation, i enjoy entering Q's contests from time to time, and i also write trading systems for my own personal use. The two different types (for Q vs for myself) are very, very different in character. The ones for Q are equity L/S, and the ones for myself are not. No problem. Sometimes the comparisons between the two different types are quite interesting.

Cheers, best wishes, Tony

Hi Tony Morland,

The book I'm using is called "The Lazy Fundamental Analyst" by Fred Piard.

I am wondering if these strategies are so simple, have they been used substantially already in the market? Let me know if you find something.

Also, I have noticed that you are a great contributor to Quantopian's forum. Are you involved with Quantopian in any way? Have you won any contest? Why are you so committed - I'm curious...

Nam

Hi Nam,
I haven't seen that book but i will check it out. Thanks.

There are some good modern books on using Fundamental Analysis in trading. In particular, i would definitely recommend the following as being what i consider to be among some of the best. I have coded up a lot of the ideas in them for use in my own personal trading systems for stocks (which is not here in Q and is not in python, which i am not really very skilled is using).
Recommended books:
-- "Quantitative Value" by Wesley Gray & Tobias Carlisle, 2013 (Wiley).
-- "Your Complete Guide to factor-Based Investing" by Andrew Berkin & Larry Swedroe, 2016 (BAM Alliance Press).

Yes, i believe all of the strategies are substantially and widely used, but are still quite sound and effective, and so definitely worth looking at.
Also, if you have any background or skills related to understanding corporate financial statements, then that can help a lot to assist you to know how to read between the lines a bit. Companies are not always 100% honest with their financial reporting, and there are lots of different ways for them to "cook the books", especially with regard to reported Earnings. That is, i believe, the main reason why any calculations involving PE ratios are not always as successful as we might hope ... but there are ways around that problem.

Thank you for your kind words about my contributions to Q's forum. I am not involved with Quantopian (Q) at all in the sense of working for them. I am just an individual trader, trading my own personal account, very much in my own ways. Sometimes i look at Q's forums, and sometimes i don't look for months because i am busy with other things.

Yes, I have some received some small cash $ winnings from several of Q's contests, although i am not really a regular with them and sometimes i don't bother to even look to see what my entries are doing! I have not (yet :)) received an allocation for Q for licensing the use of any of my algos in their fund. Actually my "committment" here is somewhat sporadic in terms of time, but when i do write to the forum here, my efforts in what i write are indeed often quite intense.

Here is an honest statement of what i think of Q and of the forum and the people who write here:
I believe that Q is genuine in its stated intention to "democritize finance", and people like Delaney McKenzie an Max Magenot, to name just 2 of them, are doing a great job, as well as being very fine people who i have met several times at workshops in Singapore. As to how well the whole concept will work, only time will tell, but Q is certainly providing some excellent free education & tools for people to use.

I think that most of the people who write on the forum here are "dabblers", maybe smart but often either lacking practical experience in trading or too "academic" in their outlook, and there are some people with whom i will not waste my time. Nevertheless there is a small handful of contributers here who are smart, skilled, original thinkers, and very practically oriented with regard to trading. Some of these people have become "online friends" for me here in the forum, even though i have never met them. It is mostly to/for them that i write, as well as for any potential new friends.

Most traders, like most people, tend to just do the same familiar things over and over again and often simply repeat like parrots what they have read without much critical thinking or active investigation of their own. So, just as most trading books are repetitive, sometimes wrong, and often not worth reading, similarly for a lot of the people who talk about trading. I am always hoping to find or meet new people who are intelligent, inquisitive and innovative. If by sharing my ideas openly with like-minded people here, we can help each other to generate just one or two good, innovative new trading ideas a year, then the effort is definitely worthwhile. In a way, my "commitment" here in the Q forum is really just self-interest :)) Does that answer your question? Feel welcome to ask more if you want to. Best wishes, Tony.

Hi Tony,

Thank you for your thoughtful response! I am very impressed with what you are doing and your intent to help others on the forum.

I have gotten and read through about 30% of the book "Your Complete Guide to Factor-based Investing" by Andrew Berkin & Larry Swedroe that you suggested. I think I should repeat the most important message of the book here, which is what makes a "factor" good:

  1. Persistent - the factor returns premiums over long periods and across different economic regimes.
  2. Pervasive - it holds across countries, sectors, asset classes.
  3. Investable - it holds even after trading costs etc
  4. Robust - it holds for various, related definitions built on the same theme.
  5. Intuitive - it has some sort of logical or behavioral explanation.

I think these 5 criteria are very important to keep in mind when finding a strategy. This will help us to avoid data-mining and over-fitting.

To that end, I re-tested my algorithm but I have found a couple issues that I'd like to consult you.

1) When I backtested the same consumer staple from only 2015-2018, the algorithm has returns below the benchmark. I have tried and looked up the positions, and it seems like the 15-year one and the 3-year one both hold the same stocks from 2015-2018. What is the right way to look at this? Does my algorithm pass the "persistent" test because it generates some premiums over a long time, or is it invalid because for the last 3 years it has been underperforming?

2) I also went and backtested strategies of different factors with screener as different sectors. Namely, I used fundamental factors like PE, PS, PB, PFCF, EPS growth 1Y, Sales Growth Q/Q, Gross Mragin, ROA, ROE, Current Ratio, Payout ratio, etc. Each strategy only used 1 factor as the one I listed above. The goal is to eliminate noises and scientifically control variables.

Some factors work really well with certain sector and some don't. For instance low payout ratio works extremely well with Tech sector while PE works well with Healthcare. Here is the link to my result spreadsheet: https://docs.google.com/spreadsheets/d/1qBKQ6IEJlNSzQpWRPnHRsYuCNoTlP-bSK41JXHCUygw/edit?usp=sharing

I'd love for you to look at this and together think about what's the next step of combining different factors. If you are free, I'd prefer us to have a Skype call to learn more from each other. Message me on Quantopian :)

Clone Algorithm
33
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
import quantopian.optimize as opt
import quantopian.algorithm as algo
from quantopian.pipeline.data import Fundamentals, factset
from quantopian.pipeline.filters import QTradableStocksUS, Q500US
from quantopian.pipeline.classifiers.fundamentals import Sector
from quantopian.pipeline.data.quandl import rateinf_inflation_usa
from odo import odo

MAX_GROSS_LEVERAGE = 1.0
TOTAL_POSITIONS = 10
def initialize(context):
    schedule_function(func = rebalance,
                      date_rule = date_rules.every_day(),
                      time_rule = time_rules.market_open(),
                      )
    attach_pipeline(make_pipeline(),'CONSUMER_DEFENSIVE')
    
    set_commission(commission.PerTrade(5))
    set_slippage(slippage.FixedBasisPointsSlippage(basis_points=5))

def make_pipeline():
    POR = factset.Fundamentals.pay_out_ratio_af.latest
    NPM = Fundamentals.net_margin.latest
    PE = Fundamentals.pe_ratio.latest
    NAME = Fundamentals.legal_name.latest
    
    sp_500 = Q500US()
    
    universe = Q500US() & Sector.eq(Sector(), Sector.CONSUMER_DEFENSIVE) #& (POR <= 0.0000000000001)
    #filter1 = PFCF.bottom(20, universe)
    longs = PE.bottom(10, universe)
    return Pipeline(
        columns={
            #'POR': POR,
            'longs': longs,
            'sector': Sector(),
            'PE': PE,
            'NAME': NAME,
        },
        screen =  universe & longs & sp_500
    )

def before_trading_start(context, data):
    context.pipeline_output = pipeline_output('CONSUMER_DEFENSIVE')
    print context.pipeline_output
    context.longs = context.pipeline_output[context.pipeline_output['longs']].index
    
def rebalance(context,data):
    my_positions = context.portfolio.positions
    long_weight = 1.0/TOTAL_POSITIONS
    
    for security in my_positions:
        if security not in context.longs and data.can_trade(security):
            order_target_percent(security, 0)
    
    for security in context.longs:
        if data.can_trade(security):
            if security not in my_positions:
               order_target_percent(security, long_weight)
        else:
            log.info("Didn't open position")
There was a runtime error.