-
Notifications
You must be signed in to change notification settings - Fork 6
/
CTAsys.m
110 lines (93 loc) · 4 KB
/
CTAsys.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
function PAI=CTAsys(Y,X,N,K,T,A_,sqrtht,iV,iVb_prior,PAI,rndStream)
% =========================================================================
% Performs a draw from the conditional posterior of the VAR conditional
% mean coefficients by using the triangular algorithm. The
% triangularization achieves computation gains of order N^2 where N is the
% number of variables in the VAR. Carriero, Clark and Marcellino (2015),
% Large Vector Autoregressions with stochastic volatility and flexible
% priors.
%
% The model is:
%
% Y(t) = Pai(L)Y(t-1) + v(t); Y(t) is Nx1; t=1,...,T, L=1,...,p.
% v(t) = inv(A)*(LAMBDA(t)^0.5)*e(t); e(t) ~ N(0,I);
% _ _
% | 1 0 0 ... 0 |
% | A(2,1) 1 0 ... 0 |
% A = | A(3,1) A(3,2) 1 ... 0 |
% | ... ... ... ... ... |
% |_ A(N,1) ... ... A(N,N-1) 1 _|
%
% Lambda(t)^0.5 = diag[sqrt_h(1,t) , .... , sqrt_h(N,t)];
%
% INPUTS
% Data and pointers:
% Y = (TxN) matrix of data appearing on the LHS of the VAR
% X = (TxKxN) matrix of data appearing on the RHS of the VAR (separate for every equation)
% N = scalar, #of variables in VAR
% K = scalar, #of regressors (=N*p+1)
% T = scalar, #of observations
% The matrix X needs to be ordered as: [1, y(t-1), y(t-2),..., y(t-p)]
%
% Error variance stuff:
% invA_ = (NxN) inverse of lower triangular covariance matrix A
% sqrtht = (TxN) time series of diagonal elements of volatility matrix
% For a homosckedastic system, with Sigma the error variance, one can
% perform the LDL decomposition (command [L,D]=LDL(Sigma)) and set inv_A=L
% and sqrtht=repmat(sqrt(diag(D)'),T,1).
%
% Priors:
% iV = (NKxNK) precision matrix for VAR coefficients
% iVB_prior = (NKx1) (prior precision)*(prior mean)
% Note 1:iV is the inverse of the prior matrix and iVB_prior is the product
% of iV and the prior mean vector, which both need to be computed only once,
% before the start of the main MCMC loop.
% Note 2:in this code, iV is assumed block-diagonal. This implies that the
% prior is independent across equations. This includes most of the priors
% usually considered, including the Minnesota one. To use a non-block
% diagonal iV one needs to modify the code using the recursions illustrated
% in equations (37) and (38).
%
% OUTPUT
% One draw from (PAI|A,Lambda,data)
% PAI=[Pai(0), Pai(1), ..., Pai(p)].
% =========================================================================
% y_til=Y*A_';
zdraws = randn(rndStream,K,N);
XPAI = NaN(T,N);
Ik = eye(K);
for j=1:N
% select coefficients of equation j to remove from the LHS
% PAI(:,j)=zeros(K,1);
for jj = 1 : N
if jj == j
XPAI(:,jj) = 0;
else
XPAI(:,jj) = X(:,:,jj) * PAI(:,jj);
end
end
% build model
lambda=vec(sqrtht(:,j:N));
Y_j=vec((Y-XPAI)*A_(j:N,:)')./lambda;
X_j=kron(A_(j:N,j),X(:,:,j))./lambda;
% posterior moments
index=K*(j-1)+1:(K*(j-1)+K);
iV_post = iV(index,index) + X_j'*X_j;
[iVchol_post, ispd] = chol(iV_post, 'lower');
if ispd == 0
Vchol_post = (iVchol_post \ Ik)'; % note: Vchol_post used below for constructing draws; hence the transpose
V_post = Vchol_post * Vchol_post';
else % proceed via QR
warning('switching to QR routine')
iVchol = chol(iV(index,index))'; % could do this once and for all, not for every call of triang
% set up Kailath's fast array matrix
qrM = [iVchol, X_j'];
R = triu(qr(qrM',0))';
iVchol_post = R(1:K,1:K);
Vchol_post = (iVchol_post \ Ik)'; % upper triangular but OK
V_post = Vchol_post * Vchol_post';
end
% posterior draw
b_post = V_post*(iVb_prior(index) + X_j'*Y_j);
PAI(:,j) = b_post + Vchol_post * zdraws(:,j);
end