\[ \newcommand{\argmin}{\operatorname{argmin}} \newcommand{\sign}{\operatorname{sign}} \newcommand{\diag}[1]{\operatorname{diag}(#1)} \newcommand{\prox}[2]{\operatorname{prox}_{#1}(#2)} \]
The horseshoe+ prior, as a scale mixture of normals, is given by \[ \begin{align*} (\theta_j | \lambda_j, \tau) &\sim \operatorname{N}(0, \lambda_j^2) \\ \lambda_j &\sim \operatorname{C}^+(0,\eta_j \tau) \\ \eta_j &\sim \operatorname{C}^+(0,1) \end{align*} \] The original horseshoe is \[ \begin{align*} (\theta_j | \lambda_j, \tau) &\sim \operatorname{N}(0, \lambda_j^2) \\ \lambda_j &\sim \operatorname{C}^+(0,\tau) \end{align*} \]
In the following simulations we generate data according to \[ \begin{align*} y_k &\sim \operatorname{N}\left(\theta, \bf{I}\right) \\ \tau &\sim \operatorname{C}^+(0,1) \end{align*} \]
Qn=1
A=10
J=100
theta.true = c(rep(A,Qn),rep(0,J-Qn))
We construct a signal with 1 non-zero components of magnitude 10 and dimension \(J=100\).
We generate the data with the following:
test.data = list('J'=J, 'K'=1,
'y'=rnorm(J,theta.true,1))
First, we load the necessary R packages:
library(rstan)
set_cppo("fast")
library(ggplot2)
library(plyr)
library(reshape2)
The horseshoe+ is implemented in Stan with the following:
stan.hsplus.code = "
data {
int<lower=0> J;
vector[J] y;
}
parameters {
vector[J] theta_step;
vector<lower=0>[J] lambda;
vector<lower=0>[J] eta;
real<lower=0> tau;
}
transformed parameters {
vector[J] theta;
theta <- ((theta_step .* lambda) .* eta) * tau;
}
model {
tau ~ cauchy(0, 1);
eta ~ cauchy(0, 1);
lambda ~ cauchy(0, 1);
theta_step ~ normal(0, 1);
y ~ normal(theta, 1);
}
"
and the regular horseshoe:
stan.hs.code = "
data {
int<lower=0> J;
vector[J] y;
}
parameters {
vector[J] theta_step;
vector<lower=0>[J] lambda;
real<lower=0> tau;
}
transformed parameters {
vector[J] theta;
theta <- (theta_step .* lambda) * tau;
}
model {
tau ~ cauchy(0, 1);
lambda ~ cauchy(0, 1);
theta_step ~ normal(0, 1);
y ~ normal(theta, 1);
}
"
and the \(\operatorname{N}(0,300)\):
stan.norm.code = "
data {
int<lower=0> J;
vector[J] y;
}
parameters {
vector[J] theta;
}
model {
theta ~ normal(0, sqrt(300));
y ~ normal(theta, 1);
}
"
It’s necessary to compile the code in Stan (we use Clang):
stan.hsplus.fit = stan_model(model_code=stan.hsplus.code, model_name="hs+ cauchy")
stan.hs.fit = stan_model(model_code=stan.hs.code, model_name="hs cauchy")
stan.norm.fit = stan_model(model_code=stan.norm.code, model_name="normal")
n.iters = 1500
n.chains = 1
Stan is run with 1 chains of 1500 iterations each.
smpls.hsplus.res = sampling(stan.hsplus.fit,
data = test.data,
iter = n.iters,
#init = 0,
#seed = rng.seed,
chains = n.chains)
theta.smpls.hsplus = extract(smpls.hsplus.res, pars=c("theta"), permuted=TRUE)[[1]]
hsplus.sample.data = melt(extract(smpls.hsplus.res, permuted=TRUE))
colnames(hsplus.sample.data) = c("iteration", "component", "value", "variable")
hist.hsplus.ci.pct = 0.60
Next, we produce a histogram of the data within a 60% interval of the \(\theta\) sum of squares for the prior and posterior distributions:
post.sum.theta.hsplus = apply(theta.smpls.hsplus, 1, function(x) crossprod(x))
prior.sum.theta.hsplus = replicate(nrow(theta.smpls.hsplus),
{
tau = rcauchy(1, 0, 1)
eta = rcauchy(J, 0, 1)
lambda = rcauchy(J, 0, abs(tau * eta))
theta = rnorm(J, lambda, 1)
return(crossprod(theta))
})
sum.dist.hsplus = rbind(data.frame(type="prior", value=prior.sum.theta.hsplus),
data.frame(type="posterior", value=post.sum.theta.hsplus))
hist.quants.hsplus = quantile(sum.dist.hsplus$value, probs=c(hist.hsplus.ci.pct, 1.0-hist.hsplus.ci.pct))
hist.data.hsplus = subset(sum.dist.hsplus, 0 < value & value < max(hist.quants.hsplus))
hist.breaks.hsplus = hist(hist.data.hsplus$value, plot=FALSE, breaks="Scott")$breaks
ggplot(hist.data.hsplus,
aes(x=value, group=type)) + geom_histogram(aes(fill=type, y=..density..), breaks=hist.breaks.hsplus) +
xlab("sum")
The summary statistics for the prior and posterior sum samples, respectively:
## Min. 1st Qu. Median Mean 3rd Qu.
## 89 36218 390121 50455223370 4975787
## Max.
## 32421947760000
## sd=1190336815195.426025
## Min. 1st Qu. Median Mean 3rd Qu.
## 49.58601218 94.14607101 109.23103490 109.85797660 125.45867400
## Max.
## 181.37020090
## sd=23.200712
smpls.hs.res = sampling(stan.hs.fit,
data = test.data,
iter = n.iters,
#init = 0,
#seed = rng.seed,
chains = n.chains)
theta.smpls.hs = extract(smpls.hs.res, pars=c("theta"), permuted=TRUE)[[1]]
hs.sample.data = melt(extract(smpls.hs.res, permuted=TRUE))
colnames(hs.sample.data) = c("iteration", "component", "value", "variable")
hist.hs.ci.pct = 0.70
Next, we produce a histogram of the data within a 70% interval of the \(\theta\) sum of squares for the prior and posterior distributions:
post.sum.theta.hs = apply(theta.smpls.hs, 1, function(x) crossprod(x))
prior.sum.theta.hs = replicate(nrow(theta.smpls.hs),
{
tau = rcauchy(1, 0, 1)
lambda = rcauchy(J, 0, abs(tau))
theta = rnorm(J, lambda, 1)
return(crossprod(theta))
})
sum.dist.hs = rbind(data.frame(type="prior", value=prior.sum.theta.hs),
data.frame(type="posterior", value=post.sum.theta.hs))
hist.quants.hs = quantile(sum.dist.hs$value, probs=c(hist.hs.ci.pct, 1.0-hist.hs.ci.pct))
hist.data.hs = subset(sum.dist.hs, 0 < value & value < max(hist.quants.hs))
hist.breaks.hs = hist(hist.data.hs$value, plot=FALSE, breaks="Scott")$breaks
ggplot(hist.data.hs,
aes(x=value, group=type)) +
geom_histogram(aes(fill=type, y=..density..), breaks=hist.breaks.hs) +
xlab("sum")
The summary statistics for the prior and posterior sum samples, respectively:
## Min. 1st Qu. Median Mean
## 81.8 1801.8 16320.7 2337635840.0
## 3rd Qu. Max.
## 255297.5 1340189075000.0
## sd=49879953110.481285
## Min. 1st Qu. Median Mean 3rd Qu.
## 57.48049522 96.36848540 110.39041150 111.72116440 126.58787840
## Max.
## 188.45020420
## sd=21.904846
smpls.norm.res = sampling(stan.norm.fit,
data = test.data,
iter = n.iters,
#init = 0,
#seed = rng.seed,
chains = n.chains)
theta.smpls.norm = extract(smpls.norm.res, pars=c("theta"), permuted=TRUE)[[1]]
norm.sample.data = melt(extract(smpls.norm.res, permuted=TRUE))
colnames(norm.sample.data) = c("iteration", "component", "value", "variable")
Next, we produce a histogram of the \(\theta\) sum of squares for the prior and posterior distributions:
post.sum.theta.norm = apply(theta.smpls.norm, 1, function(x) crossprod(x))
prior.sum.theta.norm = replicate(nrow(theta.smpls.norm),
{
theta = rnorm(J, 0, 1)
return(crossprod(theta))
})
sum.dist.norm = rbind(data.frame(type="prior", value=prior.sum.theta.norm),
data.frame(type="posterior", value=post.sum.theta.norm))
hist.data.norm = sum.dist.norm
hist.breaks.norm = hist(hist.data.norm$value, plot=FALSE, breaks="Scott")$breaks
ggplot(hist.data.norm,
aes(x=value, group=type)) +
geom_histogram(aes(fill=type, y=..density..), breaks=hist.breaks.norm) +
xlab("sum")
The summary statistics for the prior and posterior sum samples, respectively:
## Min. 1st Qu. Median Mean 3rd Qu.
## 60.55858035 89.74497565 99.02640867 99.50633201 107.77446020
## Max.
## 155.45921150
## sd=13.987617
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 207.9682104 268.7893088 290.1336180 291.4013372 311.2445292 391.4839811
## sd=30.946514