使用下面的示例数据为这个非常有趣的问题提供候选解决方案。为了适应惩罚 VAR,我使用了最近发布的BigVAR包。在这一点上,它不具备生成 FEVD、历史分解、预测扇形图等通常的额外功能,但可以通过cv.BigVAR
. 这是我在下面的第一步中所做的。我将让您自己使用包功能来微调简化形式的估计。
# BigVAR ----
library(BigVAR)
library(expm)
library(data.table)
# Create model
data(Y)
p = 4 # lags
k = ncol(Y) # number of equations
N = nrow(Y) - p # number of obs
# Create a Basic VAR-L (Lasso Penalty) with maximum lag order p=4, 10 gridpoints with lambda optimized according to rolling validation of 1-step ahead MSFE
mod1 = constructModel(Y,p,"Basic",gran=c(150,10),RVAR=FALSE,h=1,cv="Rolling",MN=FALSE,verbose=FALSE,IC=TRUE)
results = cv.BigVAR(mod1)
# Get model estimates
A = results@betaPred[,2:ncol(results@betaPred)] # (k x (k*p)) = (3 x (3*4)) coefficient matrix (reduced form)
Sigma = crossprod(resid(varres))/(N-(k*p)-1)
从那里我们可以使用标准公式来计算 FEVD。对于易于理解的介绍,我强烈推荐Lutz Kilian 和 Helmut Lütkepohl 的“结构向量自回归分析”的第 4 章。下面实现了我们需要的一切,从简化形式的 IRF 到 MSPE,最后是我们在 R 中计算 FEVD 的公式。这可能需要大量学习,我还没有时间完善这段代码,所以这很可能是做得更有效,但希望它仍然提供信息。请注意,我使用标准 Choleski 分解来识别 VAR,因此变量排序的通常含义适用。
# A number of helper functions ----
# Compute reduced-form IRFs:
compute_Phi = function(p, k, A_comp, n.ahead) {
J = matrix(0,nrow=k,ncol=k*p)
diag(J) = 1
Phi = lapply(1:n.ahead, function(i) {
J %*% (A_comp %^% (i-1)) %*% t(J)
})
return(Phi)
}
# Compute orthogonalized IRFs:
compute_theta_kj_sq = function(Theta, n.ahead) {
theta_kj_sq = lapply(1:n.ahead, function(h) { # loop over h time periods
out = sapply(1:ncol(Theta[[h]]), function(k) {
terms_to_sum = lapply(1:h, function(i) {
Theta[[i]][k,]**2
})
theta_kj_sq_h = Reduce(`+`, terms_to_sum)
})
colnames(out) = colnames(Theta[[h]])
return(out)
})
return(theta_kj_sq)
}
# Compute mean squared prediction error:
compute_mspe = function(Theta, n.ahead=10) {
mspe = lapply(1:n.ahead, function(h) {
terms_to_sum = lapply(1:h, function(i) {
tcrossprod(Theta[[i]])
})
mspe_h = Reduce(`+`, terms_to_sum)
})
}
# Function for FEVD:
fevd = function(A, Sigma, n.ahead) {
k = dim(A)[1] # number of equations
p = dim(A)[2]/k # number of lags
# Turn into companion form:
if (p>1) {
A_comp = VarptoVar1MC(A,p,k)
} else {
A_comp = A
}
# Compute MSPE: ----
Phi = compute_Phi(p,k,A_comp,n.ahead)
P = t(chol.default(Sigma)) # lower triangular Cholesky factor
B_0 = solve(P) # identification matrix
Theta = lapply(1:length(Phi), function(i) {
Phi[[i]] %*% solve(B_0)
})
theta_kj_sq = compute_theta_kj_sq(Theta, n.ahead) # Squared orthogonaliyed IRFs
mspe = compute_mspe(Theta, n.ahead)
# Compute percentage contributions (i.e. FEVDs): ----
fevd_list = lapply(1:k, function(k) {
t(sapply(1:length(mspe), function(h) {
mspe_k = mspe[[h]][k,k]
theta_k_sq = theta_kj_sq[[h]][,k]
fevd = theta_k_sq/mspe_k
}))
})
# Tidy up
fevd_tidy = data.table::rbindlist(
lapply(1:length(fevd_list), function(k) {
fevd_k = data.table::melt(data.table::data.table(fevd_list[[k]])[,h:=.I], id.vars = "h", variable.name = "j")
fevd_k[,k:=paste0("V",k)]
data.table::setcolorder(fevd_k, c("k", "j", "h"))
})
)
return(fevd_tidy)
}
最后,让我们实现公式n.ahead=20
并绘制结果。
fevd_res = fevd(A, Sigma, 20)
library(ggplot2)
p = ggplot(data=fevd_res) +
geom_area(aes(x=h, y=value, fill=j)) +
facet_wrap(k ~ .) +
scale_x_continuous(
expand=c(0,0)
) +
scale_fill_discrete(
name = "Shock"
) +
labs(
y = "Variance contribution",
x = "Forecast horizon"
) +
theme_bw()
p
希望这对您有所帮助,如果有任何后续问题,请大声疾呼。
最后要注意一点:我已经使用您在问题中提到的vars包将 FEVD 函数与标准 VAR 的结果进行了比较,并进行了检查(见下文)。但这就是我的“单元测试”已经过去了。该代码尚未经过任何人的审查,因此请注意这一点,并可能自己挖掘公式。如果您或其他人发现任何错误,我将不胜感激反馈。
编辑 1
为了完整起见,快速比较上面返回的结果vars::fevd
和自定义函数。
# Compare to vars package ----
library(vars)
p = 4 # lags
k = ncol(Y) # number of equations
N = nrow(Y) - p
colnames(Y) = sprintf("V%i", 1:ncol(Y))
n.ahead = 20
varres = vars::VAR(Y,p) # reduced-form model using package command; vars:: to make clear that pkg
# Get estimates for custom fevd function:
Sigma = crossprod(resid(varres))/(N-(k*p)-1)
A = t(
sapply(coef(varres), function(i) {
i[,1]
})
)
A = A[,1:(ncol(A)-1)]
# Run the two different functions:
fevd_pkg = vars::fevd(varres, n.ahead)
fevd_cus = fevd(A, Sigma, n.ahead)
现在比较第一个变量的输出:
> # Package:
> head(fevd_pkg$V1)
V1 V2 V3
[1,] 1.0000000 0.00000000 0.00000000
[2,] 0.9399842 0.01303013 0.04698572
[3,] 0.9422918 0.01062750 0.04708065
[4,] 0.9231440 0.01409313 0.06276291
[5,] 0.9305901 0.01335727 0.05605267
[6,] 0.9093144 0.01278727 0.07789833
> # Custom:
> head(dcast(fevd_cus[k=="V1"], k+h~j, value.var = "value"))
k h V1 V2 V3
1: V1 1 1.0000000 0.00000000 0.00000000
2: V1 2 0.9399842 0.01303013 0.04698572
3: V1 3 0.9422918 0.01062750 0.04708065
4: V1 4 0.9231440 0.01409313 0.06276291
5: V1 5 0.9305901 0.01335727 0.05605267
6: V1 6 0.9093144 0.01278727 0.07789833
编辑 2
要在不依赖于输出的情况下在 R 中获得广义 FEVD vars::VAR()
,您可以使用以下函数。我已经回收了frequencyConnectedness::genFEVD
.
# Generalized FEVD ----
library(frequencyConnectedness)
genFEVD_cus = function(
A,
Sigma,
n.ahead,
no.corr=F
) {
k = dim(A)[1] # number of equations
p = dim(A)[2]/k # number of lags
# Turn into companion form:
if (p>1) {
A_comp = BigVAR::VarptoVar1MC(A,p,k)
} else {
A_comp = A
}
# Set off-diagonals to zero:
if (no.corr) {
Sigma = diag(diag(Sigma))
}
Phi = compute_Phi(p,k,A_comp,n.ahead+1) # Reduced-form irfs
denom = diag(Reduce("+", lapply(Phi, function(i) i %*% Sigma %*%
t(i))))
enum = Reduce("+", lapply(Phi, function(i) (i %*% Sigma)^2))
tab = sapply(1:nrow(enum), function(j) enum[j, ]/(denom[j] *
diag(Sigma)))
tab = t(apply(tab, 2, function(i) i/sum(i)))
return(tab)
}
继续上面的示例(编辑 1),将自定义函数的结果与frequencyConnectedness::genFEVD
取决于以下输出的结果进行比较vars::VAR()
:
> frequencyConnectedness::genFEVD(varres, n.ahead)
V1 V2 V3
[1,] 0.89215734 0.02281892 0.08502374
[2,] 0.72025050 0.19335481 0.08639469
[3,] 0.08328267 0.11769438 0.79902295
> genFEVD_cus(A, Sigma, n.ahead)
V1 V2 V3
[1,] 0.89215734 0.02281892 0.08502374
[2,] 0.72025050 0.19335481 0.08639469
[3,] 0.08328267 0.11769438 0.79902295