Back to Community
My last shared algorithm - Good luck all

After several years, I decided to stop using Quantopian. Here is an algorithm using Copula models. Hope you find it useful.

Clone Algorithm
266
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
import cvxpy as cvx
import pandas as pd
import scipy as sp
import statsmodels.api as smapi
from sklearn.decomposition import PCA
from sklearn.cluster import affinity_propagation as AF
from sklearn.covariance import OAS

import quantopian.optimize as opt
import quantopian.algorithm as algo
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.algorithm import attach_pipeline, pipeline_output
from statsmodels.distributions.empirical_distribution import ECDF
from quantopian.pipeline.experimental import QTradableStocksUS, risk_loading_pipeline 
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.factors.morningstar import MarketCap
from quantopian.pipeline.data import morningstar as mstar

def make_pipeline():
    minprice = USEquityPricing.close.latest > 10
    pipe = Pipeline(screen=QTradableStocksUS() & minprice)
    sectors = Sector()
    pipe.add(sectors, 'sector')
    return pipe
    
def initialize(context):
    context.sectorStocks = {}
    context.stocks = None
    context.alphas = None
    
    algo.set_slippage(slippage.FixedBasisPointsSlippage())
    
    context.sector_ids = [ Sector.BASIC_MATERIALS,
                           Sector.CONSUMER_CYCLICAL,
                           Sector.FINANCIAL_SERVICES,
                           Sector.REAL_ESTATE,
                           Sector.CONSUMER_DEFENSIVE,
                           Sector.HEALTHCARE,
                           Sector.UTILITIES,
                           Sector.COMMUNICATION_SERVICES,
                           Sector.ENERGY,
                           Sector.INDUSTRIALS,
                           Sector.TECHNOLOGY ]
    
    context.leverage = 1.
    context.days = 60
    algo.schedule_function(trade_sectors, 
                      date_rules.every_day(), 
                      time_rules.market_open(minutes=15))
    
    algo.schedule_function(update_chart, 
                      date_rules.every_day(), 
                      time_rules.market_close(minutes=1))
    
    algo.attach_pipeline(make_pipeline(), 'pipe')
    algo.attach_pipeline(risk_loading_pipeline(), 'risk_loading_pipeline')
    
def handle_data(context, data):
    pass

def before_trading_start(context, data):
    if context.days < 60:
        context.days += 1
        compute(context, data)
        return
  
    context.days = 0
    context.output = algo.pipeline_output("pipe")
    context.risk_loading_pipeline = algo.pipeline_output('risk_loading_pipeline')
    context.sectorStocks.clear()
    
    context.sectorStocks[Sector.BASIC_MATERIALS] = get_cluster(context, data, context.output[context.output.sector == Sector.BASIC_MATERIALS].index)    
    context.sectorStocks[Sector.CONSUMER_CYCLICAL]= get_cluster(context, data, context.output[context.output.sector == Sector.CONSUMER_CYCLICAL].index)    
    context.sectorStocks[Sector.CONSUMER_DEFENSIVE]= get_cluster(context, data, context.output[context.output.sector == Sector.CONSUMER_DEFENSIVE].index)
    context.sectorStocks[Sector.FINANCIAL_SERVICES]= get_cluster(context, data, context.output[context.output.sector == Sector.FINANCIAL_SERVICES].index)
    context.sectorStocks[Sector.REAL_ESTATE] = get_cluster(context, data, context.output[context.output.sector == Sector.REAL_ESTATE].index)
    context.sectorStocks[Sector.HEALTHCARE] = get_cluster(context, data, context.output[context.output.sector == Sector.HEALTHCARE].index)
    context.sectorStocks[Sector.UTILITIES] = get_cluster(context, data, context.output[context.output.sector == Sector.UTILITIES].index)
    context.sectorStocks[Sector.COMMUNICATION_SERVICES] = get_cluster(context, data, context.output[context.output.sector == Sector.COMMUNICATION_SERVICES].index)
    context.sectorStocks[Sector.ENERGY] = get_cluster(context, data, context.output[context.output.sector == Sector.ENERGY].index)
    context.sectorStocks[Sector.INDUSTRIALS]= get_cluster(context, data, context.output[context.output.sector == Sector.INDUSTRIALS].index)
    context.sectorStocks[Sector.TECHNOLOGY] = get_cluster(context, data, context.output[context.output.sector == Sector.TECHNOLOGY].index)
    compute(context, data)
    
def get_cluster(context, data, stocks):
    prices = data.history(stocks, "price", 250, "1d").resample('W').last()
    prices = prices.dropna(axis=1)
    returns  = np.log1p(prices.pct_change().dropna().values)
    cov = OAS().fit(returns).covariance_
    _, labels = AF(cov)
    
    clusters = {}
    for i, stock in enumerate(prices.columns):
        label = labels[i]
        if label not in clusters:
            clusters[label] = []
        clusters[label].append(stock)    
    
    retval = []
    
    for c in clusters:
        if len(clusters[c]) > 30:
            q = len(clusters[c]) // 30
            
            if q == 0:
                retval.append(clusters[c])
            else:
                for i in range(0, q):
                    retval.append(clusters[c][(i*30):(i+1)*30])
            
    return retval

def compute(context, data):
    context.stocks = None
    context.alphas = None
    context.sectors = {}
    context.labels = {}
    context.minw = {}
    context.maxw = {}
    context.label = 0    
    
    context.univ = None
    
    for sector_id in context.sector_ids:
        for cluster in context.sectorStocks[sector_id]:
            if context.univ is None:
                context.univ = cluster
            else:
                context.univ = np.hstack((context.univ, cluster))
                
    for sector_id in context.sector_ids:
        for cluster in context.sectorStocks[sector_id]:
            stocks, alphas = find_weights(context, data, cluster)
            
            
            context.label += 1
            if context.stocks is None:
                context.stocks = stocks
                context.alphas = alphas
            else:
                context.stocks = np.hstack((context.stocks, stocks))
                context.alphas = np.hstack((context.alphas, alphas))
                
            for sid in stocks:
                context.sectors[sid] = sector_id
                context.labels[sid] = context.label
            context.minw[context.label] = -3.0
            context.maxw[context.label] = 3.0
            
    for i in range(1, context.label+1):
        context.minw[i] /= context.label
        context.maxw[i] /= context.label
    
def trade_sectors(context, data):
    todays_universe = context.stocks
    alphas = pd.Series(context.alphas, index=todays_universe)
    alphas = alphas.fillna(0.)
    objective = opt.MaximizeAlpha(alphas)
    
    constraints = []

    constraints.append(opt.MaxGrossExposure(1.0))
    constraints.append(opt.DollarNeutral(0.01))
    constraints.append(opt.NetGroupExposure(context.labels, context.minw, context.maxw))
                       
    constraint_sector_style_risk = opt.experimental.RiskModelExposure(
        context.risk_loading_pipeline,
        version=opt.Newest,
    )
    constraints.append(constraint_sector_style_risk)
    constraints.append(
        opt.PositionConcentration.with_equal_bounds(
            min=-1. / (context.label * 5), 
            max=1. / (context.label * 5)))
    constraints.append(opt.MaxTurnover(0.6)) 
    try:
        algo.order_optimal_portfolio(objective=objective, constraints=constraints)
        record(e=0)
    except:
        record(e=1)

def find_weights(context, data, stocks):
    prices = data.history(stocks, "price", 120, "1d")
    prices = prices.dropna(axis=1)
    ret = np.log1p(prices.pct_change().dropna().values)
    factors = PCA(3).fit_transform(ret)
    retU = np.zeros(ret.shape) 
    scores = np.zeros(ret.shape[1])
    
    factorsU = np.zeros(factors.shape)
    for j in range(0, factors.shape[1]):
        params = sp.stats.t.fit(factors[:, j])
        factorsU[:, j] = sp.stats.t.cdf(factors[:, j], *params)
    
    for i in range(0, ret.shape[1]):
        params = sp.stats.t.fit(ret[:, i])
        retU[:, i] = sp.stats.t.cdf(ret[:, i], *params)
        
        corr = np.eye(factors.shape[1] + 1)
        
        for j in range(0, factors.shape[1]):
            corr[-1, j] = np.corrcoef(factors[:, j], ret[:, i])[0, 1]
            corr[j, -1] = corr[-1, j]
        s = score(corr, factorsU, retU[:, i]) - 0.5
        scores[i] = s[-45:].sum()
    
    return prices.columns.values, -scores

def update_chart(context,data):
    record(leverage = context.account.leverage)

    longs = shorts = 0
    
    for position in context.portfolio.positions.itervalues():        
        if position.amount > 0:
            longs += 1
        if position.amount < 0:
            shorts += 1
            
    record(long_lever=longs, short_lever=shorts)

def pearson_from_spearman(s):
    r = 2. * np.sin(s * np.pi / 6.)
    return r

def cov2corr( A ):
    d = np.sqrt(A.diagonal())
    A = ((A.T/d).T)/d
    return A

def pd_inv(a):
    _, evc = np.linalg.eigh(a)
    u, evl, v = np.linalg.svd(a)
    d = np.diag(1. / evl[::-1])
    return np.dot(np.dot(evc, d), evc.T), np.product(evl)

def norm_pdf_multivariate(x, cov):
    dim = x.shape[1]
    mean = np.zeros(dim)
    prec_U, log_det_cov = _psd_pinv_decomposed_log_pdet(cov)
    out = np.exp(_logpdf(x, mean, prec_U, log_det_cov))
    return out.squeeze()

def score(rho, factorsU, retU):
    X = sp.stats.norm.ppf(factorsU)
    y = sp.stats.norm.ppf(retU)
    irho, det = np.linalg.inv(rho), np.linalg.det(rho)
    adder = irho[-1, -1]
    edges = np.dot(X, irho[:-1, -1] + irho[-1, :-1].T)
    H = np.sqrt(adder) * y + edges / (2.0 * np.sqrt(adder))
    subA = np.sum(np.dot(X, irho[:-1,:-1]) * X, axis=1)
    A = subA - (edges * edges) / (4. * adder)
    t1 = np.exp(-0.5 * A) * sp.stats.norm.cdf(H)
    t2 = math.pow((2. * math.pi), (0.5 * factorsU.shape[1])) * math.pow(abs(det), 0.5) * np.sqrt(adder)
    N = t1 / t2
    D = norm_pdf_multivariate(X, rho[:-1, :-1])
    Y = N / D
    return Y

def _logpdf(x, mean, prec_U, log_det_cov):
    _LOG_2PI = np.log(2 * np.pi)
    dim = x.shape[-1]
    dev = x - mean
    maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
    return -0.5 * (dim * _LOG_2PI + log_det_cov + maha)

def _psd_pinv_decomposed_log_pdet(mat, cond=None, rcond=None,
                                  lower=True, check_finite=True):
    s, u = sp.linalg.eigh(mat, lower=lower, check_finite=check_finite)

    if rcond is not None:
        cond = rcond
    if cond in [None, -1]:
        t = u.dtype.char.lower()
        factor = {'f': 1E3, 'd': 1E6}
        cond = factor[t] * np.finfo(t).eps
    eps = cond * np.max(abs(s))

    if np.min(s) < -eps:
        raise ValueError('the covariance matrix must be positive semidefinite')

    s_pinv = _pinv_1d(s, eps)
    U = np.multiply(u, np.sqrt(s_pinv))
    log_pdet = np.sum(np.log(s[s > eps]))

    return U, log_pdet

def _pinv_1d(v, eps=1e-5):
    return np.array([0 if abs(x) < eps else 1/x for x in v], dtype=float)
There was a runtime error.
7 responses

Godspeed Aqua Rooster, thanks for all of your great contributions.

We miss you Aqua Rooster.

Take care, Pravin. Hope you find greener pastures.

Here's the algo above, run over the most recent 2 years.

Clone Algorithm
57
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import math
import numpy as np
import cvxpy as cvx
import pandas as pd
import scipy as sp
import statsmodels.api as smapi
from sklearn.decomposition import PCA
from sklearn.cluster import affinity_propagation as AF
from sklearn.covariance import OAS

import quantopian.optimize as opt
import quantopian.algorithm as algo
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.algorithm import attach_pipeline, pipeline_output
from statsmodels.distributions.empirical_distribution import ECDF
from quantopian.pipeline.experimental import QTradableStocksUS, risk_loading_pipeline 
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.factors.morningstar import MarketCap
from quantopian.pipeline.data import morningstar as mstar

def make_pipeline():
    minprice = USEquityPricing.close.latest > 10
    pipe = Pipeline(screen=QTradableStocksUS() & minprice)
    sectors = Sector()
    pipe.add(sectors, 'sector')
    return pipe
    
def initialize(context):
    context.sectorStocks = {}
    context.stocks = None
    context.alphas = None
    
    algo.set_slippage(slippage.FixedBasisPointsSlippage())
    
    context.sector_ids = [ Sector.BASIC_MATERIALS,
                           Sector.CONSUMER_CYCLICAL,
                           Sector.FINANCIAL_SERVICES,
                           Sector.REAL_ESTATE,
                           Sector.CONSUMER_DEFENSIVE,
                           Sector.HEALTHCARE,
                           Sector.UTILITIES,
                           Sector.COMMUNICATION_SERVICES,
                           Sector.ENERGY,
                           Sector.INDUSTRIALS,
                           Sector.TECHNOLOGY ]
    
    context.leverage = 1.
    context.days = 60
    algo.schedule_function(trade_sectors, 
                      date_rules.every_day(), 
                      time_rules.market_open(minutes=15))
    
    algo.schedule_function(update_chart, 
                      date_rules.every_day(), 
                      time_rules.market_close(minutes=1))
    
    algo.attach_pipeline(make_pipeline(), 'pipe')
    algo.attach_pipeline(risk_loading_pipeline(), 'risk_loading_pipeline')
    
def handle_data(context, data):
    pass

def before_trading_start(context, data):
    if context.days < 60:
        context.days += 1
        compute(context, data)
        return
  
    context.days = 0
    context.output = algo.pipeline_output("pipe")
    context.risk_loading_pipeline = algo.pipeline_output('risk_loading_pipeline')
    context.sectorStocks.clear()
    
    context.sectorStocks[Sector.BASIC_MATERIALS] = get_cluster(context, data, context.output[context.output.sector == Sector.BASIC_MATERIALS].index)    
    context.sectorStocks[Sector.CONSUMER_CYCLICAL]= get_cluster(context, data, context.output[context.output.sector == Sector.CONSUMER_CYCLICAL].index)    
    context.sectorStocks[Sector.CONSUMER_DEFENSIVE]= get_cluster(context, data, context.output[context.output.sector == Sector.CONSUMER_DEFENSIVE].index)
    context.sectorStocks[Sector.FINANCIAL_SERVICES]= get_cluster(context, data, context.output[context.output.sector == Sector.FINANCIAL_SERVICES].index)
    context.sectorStocks[Sector.REAL_ESTATE] = get_cluster(context, data, context.output[context.output.sector == Sector.REAL_ESTATE].index)
    context.sectorStocks[Sector.HEALTHCARE] = get_cluster(context, data, context.output[context.output.sector == Sector.HEALTHCARE].index)
    context.sectorStocks[Sector.UTILITIES] = get_cluster(context, data, context.output[context.output.sector == Sector.UTILITIES].index)
    context.sectorStocks[Sector.COMMUNICATION_SERVICES] = get_cluster(context, data, context.output[context.output.sector == Sector.COMMUNICATION_SERVICES].index)
    context.sectorStocks[Sector.ENERGY] = get_cluster(context, data, context.output[context.output.sector == Sector.ENERGY].index)
    context.sectorStocks[Sector.INDUSTRIALS]= get_cluster(context, data, context.output[context.output.sector == Sector.INDUSTRIALS].index)
    context.sectorStocks[Sector.TECHNOLOGY] = get_cluster(context, data, context.output[context.output.sector == Sector.TECHNOLOGY].index)
    compute(context, data)
    
def get_cluster(context, data, stocks):
    prices = data.history(stocks, "price", 250, "1d").resample('W').last()
    prices = prices.dropna(axis=1)
    returns  = np.log1p(prices.pct_change().dropna().values)
    cov = OAS().fit(returns).covariance_
    _, labels = AF(cov)
    
    clusters = {}
    for i, stock in enumerate(prices.columns):
        label = labels[i]
        if label not in clusters:
            clusters[label] = []
        clusters[label].append(stock)    
    
    retval = []
    
    for c in clusters:
        if len(clusters[c]) > 30:
            q = len(clusters[c]) // 30
            
            if q == 0:
                retval.append(clusters[c])
            else:
                for i in range(0, q):
                    retval.append(clusters[c][(i*30):(i+1)*30])
            
    return retval

def compute(context, data):
    context.stocks = None
    context.alphas = None
    context.sectors = {}
    context.labels = {}
    context.minw = {}
    context.maxw = {}
    context.label = 0    
    
    context.univ = None
    
    for sector_id in context.sector_ids:
        for cluster in context.sectorStocks[sector_id]:
            if context.univ is None:
                context.univ = cluster
            else:
                context.univ = np.hstack((context.univ, cluster))
                
    for sector_id in context.sector_ids:
        for cluster in context.sectorStocks[sector_id]:
            stocks, alphas = find_weights(context, data, cluster)
            
            
            context.label += 1
            if context.stocks is None:
                context.stocks = stocks
                context.alphas = alphas
            else:
                context.stocks = np.hstack((context.stocks, stocks))
                context.alphas = np.hstack((context.alphas, alphas))
                
            for sid in stocks:
                context.sectors[sid] = sector_id
                context.labels[sid] = context.label
            context.minw[context.label] = -3.0
            context.maxw[context.label] = 3.0
            
    for i in range(1, context.label+1):
        context.minw[i] /= context.label
        context.maxw[i] /= context.label
    
def trade_sectors(context, data):
    todays_universe = context.stocks
    alphas = pd.Series(context.alphas, index=todays_universe)
    alphas = alphas.fillna(0.)
    objective = opt.MaximizeAlpha(alphas)
    
    constraints = []

    constraints.append(opt.MaxGrossExposure(1.0))
    constraints.append(opt.DollarNeutral(0.01))
    constraints.append(opt.NetGroupExposure(context.labels, context.minw, context.maxw))
                       
    constraint_sector_style_risk = opt.experimental.RiskModelExposure(
        context.risk_loading_pipeline,
        version=opt.Newest,
    )
    constraints.append(constraint_sector_style_risk)
    constraints.append(
        opt.PositionConcentration.with_equal_bounds(
            min=-1. / (context.label * 5), 
            max=1. / (context.label * 5)))
    constraints.append(opt.MaxTurnover(0.6)) 
    try:
        algo.order_optimal_portfolio(objective=objective, constraints=constraints)
        record(e=0)
    except:
        record(e=1)

def find_weights(context, data, stocks):
    prices = data.history(stocks, "price", 120, "1d")
    prices = prices.dropna(axis=1)
    ret = np.log1p(prices.pct_change().dropna().values)
    factors = PCA(3).fit_transform(ret)
    retU = np.zeros(ret.shape) 
    scores = np.zeros(ret.shape[1])
    
    factorsU = np.zeros(factors.shape)
    for j in range(0, factors.shape[1]):
        params = sp.stats.t.fit(factors[:, j])
        factorsU[:, j] = sp.stats.t.cdf(factors[:, j], *params)
    
    for i in range(0, ret.shape[1]):
        params = sp.stats.t.fit(ret[:, i])
        retU[:, i] = sp.stats.t.cdf(ret[:, i], *params)
        
        corr = np.eye(factors.shape[1] + 1)
        
        for j in range(0, factors.shape[1]):
            corr[-1, j] = np.corrcoef(factors[:, j], ret[:, i])[0, 1]
            corr[j, -1] = corr[-1, j]
        s = score(corr, factorsU, retU[:, i]) - 0.5
        scores[i] = s[-45:].sum()
    
    return prices.columns.values, -scores

def update_chart(context,data):
    record(leverage = context.account.leverage)

    longs = shorts = 0
    
    for position in context.portfolio.positions.itervalues():        
        if position.amount > 0:
            longs += 1
        if position.amount < 0:
            shorts += 1
            
    record(long_lever=longs, short_lever=shorts)

def pearson_from_spearman(s):
    r = 2. * np.sin(s * np.pi / 6.)
    return r

def cov2corr( A ):
    d = np.sqrt(A.diagonal())
    A = ((A.T/d).T)/d
    return A

def pd_inv(a):
    _, evc = np.linalg.eigh(a)
    u, evl, v = np.linalg.svd(a)
    d = np.diag(1. / evl[::-1])
    return np.dot(np.dot(evc, d), evc.T), np.product(evl)

def norm_pdf_multivariate(x, cov):
    dim = x.shape[1]
    mean = np.zeros(dim)
    prec_U, log_det_cov = _psd_pinv_decomposed_log_pdet(cov)
    out = np.exp(_logpdf(x, mean, prec_U, log_det_cov))
    return out.squeeze()

def score(rho, factorsU, retU):
    X = sp.stats.norm.ppf(factorsU)
    y = sp.stats.norm.ppf(retU)
    irho, det = np.linalg.inv(rho), np.linalg.det(rho)
    adder = irho[-1, -1]
    edges = np.dot(X, irho[:-1, -1] + irho[-1, :-1].T)
    H = np.sqrt(adder) * y + edges / (2.0 * np.sqrt(adder))
    subA = np.sum(np.dot(X, irho[:-1,:-1]) * X, axis=1)
    A = subA - (edges * edges) / (4. * adder)
    t1 = np.exp(-0.5 * A) * sp.stats.norm.cdf(H)
    t2 = math.pow((2. * math.pi), (0.5 * factorsU.shape[1])) * math.pow(abs(det), 0.5) * np.sqrt(adder)
    N = t1 / t2
    D = norm_pdf_multivariate(X, rho[:-1, :-1])
    Y = N / D
    return Y

def _logpdf(x, mean, prec_U, log_det_cov):
    _LOG_2PI = np.log(2 * np.pi)
    dim = x.shape[-1]
    dev = x - mean
    maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
    return -0.5 * (dim * _LOG_2PI + log_det_cov + maha)

def _psd_pinv_decomposed_log_pdet(mat, cond=None, rcond=None,
                                  lower=True, check_finite=True):
    s, u = sp.linalg.eigh(mat, lower=lower, check_finite=check_finite)

    if rcond is not None:
        cond = rcond
    if cond in [None, -1]:
        t = u.dtype.char.lower()
        factor = {'f': 1E3, 'd': 1E6}
        cond = factor[t] * np.finfo(t).eps
    eps = cond * np.max(abs(s))

    if np.min(s) < -eps:
        raise ValueError('the covariance matrix must be positive semidefinite')

    s_pinv = _pinv_1d(s, eps)
    U = np.multiply(u, np.sqrt(s_pinv))
    log_pdet = np.sum(np.log(s[s > eps]))

    return U, log_pdet

def _pinv_1d(v, eps=1e-5):
    return np.array([0 if abs(x) < eps else 1/x for x in v], dtype=float)
There was a runtime error.

Best of luck to you always Aqua Rooster. Thanks for your contribution here. Will miss you.

Dan

Sad to see you go. I've learned a lot from your posts over the last year, even if I've never posted anywhere. I hope you find success on your next venture. Take care mate, and thank you for everything.

Best of luck Pravin. Hope you reconsider in the future.