diff --git a/DESCRIPTION b/DESCRIPTION index 9ec16912..35e058be 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: bvhar Type: Package Title: Bayesian Vector Heterogeneous Autoregressive Modeling -Version: 2.1.1 +Version: 2.1.2 Authors@R: c(person(given = "Young Geun", family = "Kim", diff --git a/NEWS.md b/NEWS.md index 1fcb95ea..b0a7e198 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,11 @@ +# bvhar 2.1.2 + +* Fix MCMC algorithm for `include_mean = TRUE` case. + +* Fix predictive distribution update codes (`predict()`, `forecast_roll()`, and `forecast_expand()` for `ldltmod` and `svmod` classes). + +* Fix out-of-forecasting (`forecast_roll()` and `forecast_expand()`) result process codes. + # bvhar 2.1.1 * When using GIG generation in MCMC, it has maximum iteration numbers of while statement. diff --git a/R/RcppExports.R b/R/RcppExports.R index 8d183d4a..c77f62c8 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -868,11 +868,10 @@ forecast_bvharldlt <- function(num_chains, month, step, response_mat, HARtrans, #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -roll_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_roll_bvarldlt`, y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +roll_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_roll_bvarldlt`, y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -899,11 +898,10 @@ roll_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, spar #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -roll_bvharldlt <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_roll_bvharldlt`, y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +roll_bvharldlt <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_roll_bvharldlt`, y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -930,11 +928,10 @@ roll_bvharldlt <- function(y, week, month, num_chains, num_iter, num_burn, thinn #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -expand_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_expand_bvarldlt`, y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +expand_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_expand_bvarldlt`, y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -961,11 +958,10 @@ expand_bvarldlt <- function(y, lag, num_chains, num_iter, num_burn, thinning, sp #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -expand_bvharldlt <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_expand_bvharldlt`, y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +expand_bvharldlt <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_expand_bvharldlt`, y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Forecasting BVAR(p) @@ -1278,11 +1274,10 @@ forecast_bvharsv <- function(num_chains, month, step, response_mat, HARtrans, sv #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -roll_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_roll_bvarsv`, y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +roll_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_roll_bvarsv`, y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -1309,11 +1304,10 @@ roll_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, sp #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -roll_bvharsv <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_roll_bvharsv`, y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +roll_bvharsv <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_roll_bvharsv`, y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -1340,11 +1334,10 @@ roll_bvharsv <- function(y, week, month, num_chains, num_iter, num_burn, thinnin #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -expand_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_expand_bvarsv`, y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +expand_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_expand_bvarsv`, y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' Out-of-Sample Forecasting of VAR-SV based on Rolling Window @@ -1371,11 +1364,10 @@ expand_bvarsv <- function(y, lag, num_chains, num_iter, num_burn, thinning, sv, #' @param step Integer, Step to forecast #' @param y_test Evaluation time series data period after `y` #' @param nthreads Number of threads -#' @param chunk_size Chunk size for OpenMP static scheduling #' #' @noRd -expand_bvharsv <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) { - .Call(`_bvhar_expand_bvharsv`, y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size) +expand_bvharsv <- function(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) { + .Call(`_bvhar_expand_bvharsv`, y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads) } #' VAR(1) Representation Given VAR Coefficient Matrix diff --git a/R/summary-forecast.R b/R/summary-forecast.R index 8871902f..ba7f0a23 100644 --- a/R/summary-forecast.R +++ b/R/summary-forecast.R @@ -220,11 +220,12 @@ forecast_roll.normaliw <- function(object, n_ahead, y_test, num_thread = 1, use_ } #' @rdname forecast_roll +#' @param level Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05. #' @param sparse `r lifecycle::badge("experimental")` Apply restriction. By default, `FALSE`. #' @param lpl `r lifecycle::badge("experimental")` Compute log-predictive likelihood (LPL). By default, `FALSE`. #' @param use_fit `r lifecycle::badge("experimental")` Use `object` result for the first window. By default, `TRUE`. #' @export -forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { +forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, level = .05, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { y <- object$y if (!is.null(colnames(y))) { name_var <- colnames(y) @@ -256,21 +257,12 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars # if (num_thread > num_chains && num_chains != 1) { # warning(sprintf("'num_thread' > MCMC chain will use not every thread. Specify as 'num_thread' <= 'object$chain' = %d.", num_chains)) # } - if (num_horizon * num_chains %/% num_thread == 0) { - warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) - } - chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) - if (chunk_size == 0) { - chunk_size <- 1 - } - # if (num_horizon > num_chains && chunk_size > num_chains) { - # chunk_size <- min( - # num_chains, - # (num_horizon %/% num_thread) * num_chains - # ) - # if (chunk_size == 0) { - # chunk_size <- 1 - # } + # if (num_horizon * num_chains %/% num_thread == 0) { + # warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) + # } + # chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) + # if (chunk_size == 0) { + # chunk_size <- 1 # } ci_lev <- 0 if (is.numeric(sparse)) { @@ -326,7 +318,7 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) }, "bvharldlt" = { @@ -371,12 +363,12 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) } ) - # num_draw <- nrow(object$a_record) # concatenate multiple chains - num_draw <- nrow(object$param) / num_chains + num_draw <- nrow(object$param) # concatenate multiple chains + # num_draw <- nrow(object$param) / num_chains if (lpl) { lpl_val <- res_mat$lpl res_mat$lpl <- NULL @@ -404,7 +396,7 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = .05 / 2) + apply(c(1, 2), quantile, probs = level / 2) }) %>% do.call(rbind, .) colnames(lower_quantile) <- name_var @@ -413,7 +405,7 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = 1 - .05 / 2) + apply(c(1, 2), quantile, probs = 1 - level / 2) }) %>% do.call(rbind, .) colnames(upper_quantile) <- name_var @@ -436,12 +428,13 @@ forecast_roll.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spars } #' @rdname forecast_roll +#' @param level Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05. #' @param use_sv Use SV term #' @param sparse `r lifecycle::badge("experimental")` Apply restriction. By default, `FALSE`. #' @param lpl `r lifecycle::badge("experimental")` Compute log-predictive likelihood (LPL). By default, `FALSE`. #' @param use_fit `r lifecycle::badge("experimental")` Use `object` result for the first window. By default, `TRUE`. #' @export -forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv = TRUE, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { +forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, level = .05, use_sv = TRUE, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { y <- object$y if (!is.null(colnames(y))) { name_var <- colnames(y) @@ -473,21 +466,12 @@ forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv # if (num_thread > num_chains && num_chains != 1) { # warning(sprintf("'num_thread' > MCMC chain will use not every thread. Specify as 'num_thread' <= 'object$chain' = %d.", num_chains)) # } - if (num_horizon * num_chains %/% num_thread == 0) { - warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) - } - chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) - if (chunk_size == 0) { - chunk_size <- 1 - } - # if (num_horizon > num_chains && chunk_size > num_chains) { - # chunk_size <- min( - # num_chains, - # (num_horizon %/% num_thread) * num_chains - # ) - # if (chunk_size == 0) { - # chunk_size <- 1 - # } + # if (num_horizon * num_chains %/% num_thread == 0) { + # warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) + # } + # chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) + # if (chunk_size == 0) { + # chunk_size <- 1 # } ci_lev <- 0 if (is.numeric(sparse)) { @@ -544,7 +528,7 @@ forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) }, "bvharsv" = { @@ -589,12 +573,12 @@ forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) } ) - # num_draw <- nrow(object$a_record) # concatenate multiple chains - num_draw <- nrow(object$param) / num_chains + num_draw <- nrow(object$param) # concatenate multiple chains + # num_draw <- nrow(object$param) / num_chains if (lpl) { lpl_val <- res_mat$lpl res_mat$lpl <- NULL @@ -622,7 +606,7 @@ forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = .05 / 2) + apply(c(1, 2), quantile, probs = level / 2) }) %>% do.call(rbind, .) colnames(lower_quantile) <- name_var @@ -631,7 +615,7 @@ forecast_roll.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = 1 - .05 / 2) + apply(c(1, 2), quantile, probs = 1 - level / 2) }) %>% do.call(rbind, .) colnames(upper_quantile) <- name_var @@ -816,11 +800,12 @@ forecast_expand.normaliw <- function(object, n_ahead, y_test, num_thread = 1, us } #' @rdname forecast_expand +#' @param level Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05. #' @param sparse `r lifecycle::badge("experimental")` Apply restriction. By default, `FALSE`. #' @param lpl `r lifecycle::badge("experimental")` Compute log-predictive likelihood (LPL). By default, `FALSE`. #' @param use_fit `r lifecycle::badge("experimental")` Use `object` result for the first window. By default, `TRUE`. #' @export -forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { +forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, level = .05, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { y <- object$y if (!is.null(colnames(y))) { name_var <- colnames(y) @@ -852,22 +837,13 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa # if (num_thread > num_chains && num_chains != 1) { # warning(sprintf("'num_thread' > MCMC chain will use not every thread. Specify as 'num_thread' <= 'object$chain' = %d.", num_chains)) # } - if (num_horizon * num_chains %/% num_thread == 0) { - warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) - } + # if (num_horizon * num_chains %/% num_thread == 0) { + # warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) + # } # chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) - chunk_size <- num_chains # use inner loop for chain as a chunk in dynamic schedule - if (chunk_size == 0) { - chunk_size <- 1 - } - # if (num_horizon > num_chains && chunk_size > num_chains) { - # chunk_size <- min( - # num_chains, - # (num_horizon %/% num_thread) * num_chains - # ) - # if (chunk_size == 0) { - # chunk_size <- 1 - # } + # chunk_size <- num_chains # use inner loop for chain as a chunk in dynamic schedule + # if (chunk_size == 0) { + # chunk_size <- 1 # } ci_lev <- 0 if (is.numeric(sparse)) { @@ -924,7 +900,7 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) }, "bvharldlt" = { @@ -969,12 +945,12 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) } ) - # num_draw <- nrow(object$a_record) # concatenate multiple chains - num_draw <- nrow(object$param) / num_chains + num_draw <- nrow(object$param) # concatenate multiple chains + # num_draw <- nrow(object$param) / num_chains if (lpl) { lpl_val <- res_mat$lpl res_mat$lpl <- NULL @@ -1011,7 +987,7 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = .05 / 2) + apply(c(1, 2), quantile, probs = level / 2) }) %>% do.call(rbind, .) colnames(lower_quantile) <- name_var @@ -1020,7 +996,7 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = 1 - .05 / 2) + apply(c(1, 2), quantile, probs = 1 - level / 2) }) %>% do.call(rbind, .) colnames(upper_quantile) <- name_var @@ -1043,12 +1019,13 @@ forecast_expand.ldltmod <- function(object, n_ahead, y_test, num_thread = 1, spa } #' @rdname forecast_expand +#' @param level Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05. #' @param use_sv Use SV term #' @param sparse `r lifecycle::badge("experimental")` Apply restriction. By default, `FALSE`. #' @param lpl `r lifecycle::badge("experimental")` Compute log-predictive likelihood (LPL). By default, `FALSE`. #' @param use_fit `r lifecycle::badge("experimental")` Use `object` result for the first window. By default, `TRUE`. #' @export -forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_sv = TRUE, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { +forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, level = .05, use_sv = TRUE, sparse = FALSE, lpl = FALSE, use_fit = TRUE, ...) { y <- object$y if (!is.null(colnames(y))) { name_var <- colnames(y) @@ -1080,22 +1057,13 @@ forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_s # if (num_thread > num_chains && num_chains != 1) { # warning(sprintf("'num_thread' > MCMC chain will use not every thread. Specify as 'num_thread' <= 'object$chain' = %d.", num_chains)) # } - if (num_horizon * num_chains %/% num_thread == 0) { - warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) - } + # if (num_horizon * num_chains %/% num_thread == 0) { + # warning(sprintf("OpenMP cannot divide the iterations as integer. Use divisor of ('nrow(y_test) - n_ahead + 1') * 'num_thread' <= 'object$chain' = %d", num_horizon * num_chains)) + # } # chunk_size <- num_horizon * num_chains %/% num_thread # default setting of OpenMP schedule(static) - chunk_size <- num_chains # use inner loop for chain as a chunk in dynamic schedule - if (chunk_size == 0) { - chunk_size <- 1 - } - # if (num_horizon > num_chains && chunk_size > num_chains) { - # chunk_size <- min( - # num_chains, - # (num_horizon %/% num_thread) * num_chains - # ) - # if (chunk_size == 0) { - # chunk_size <- 1 - # } + # chunk_size <- num_chains # use inner loop for chain as a chunk in dynamic schedule + # if (chunk_size == 0) { + # chunk_size <- 1 # } ci_lev <- 0 if (is.numeric(sparse)) { @@ -1152,7 +1120,7 @@ forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_s lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) }, "bvharsv" = { @@ -1197,12 +1165,12 @@ forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_s lpl, sample.int(.Machine$integer.max, size = num_chains * num_horizon) %>% matrix(ncol = num_chains), sample.int(.Machine$integer.max, size = num_chains), - num_thread, chunk_size + num_thread ) } ) - # num_draw <- nrow(object$a_record) # concatenate multiple chains - num_draw <- nrow(object$param) / num_chains + num_draw <- nrow(object$param) # concatenate multiple chains + # num_draw <- nrow(object$param) / num_chains if (lpl) { lpl_val <- res_mat$lpl res_mat$lpl <- NULL @@ -1239,7 +1207,7 @@ forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_s lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = .05 / 2) + apply(c(1, 2), quantile, probs = level / 2) }) %>% do.call(rbind, .) colnames(lower_quantile) <- name_var @@ -1248,7 +1216,7 @@ forecast_expand.svmod <- function(object, n_ahead, y_test, num_thread = 1, use_s lapply(function(res) { unlist(res) %>% array(dim = c(1, object$m, num_draw)) %>% - apply(c(1, 2), quantile, probs = 1 - .05 / 2) + apply(c(1, 2), quantile, probs = 1 - level / 2) }) %>% do.call(rbind, .) colnames(upper_quantile) <- name_var diff --git a/R/zzz.R b/R/zzz.R index 9f60e3de..644d5f98 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -1,3 +1,12 @@ +.onLoad <- function(libname, pkgname) { + Rcpp::registerPlugin( + "bvhar", + function() { + list(env = list(PKG_CPPFLAGS = "-DUSE_RCPP")) + } + ) +} + .onAttach <- function(libname, pkgname) { if (!interactive()) { return() diff --git a/_pkgdown.yml b/_pkgdown.yml index ff2bd809..dd867fed 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -207,8 +207,8 @@ news: href: https://github.com/ygeunkim/bvhar/releases/tag/v0.12.0 # - text: "Version 1.0.0" # href: https://github.com/ygeunkim/bvhar/releases/tag/v1.0.0 - - text: "Version 1.0.1" - href: https://github.com/ygeunkim/bvhar/releases/tag/v1.0.1 + # - text: "Version 1.0.1" + # href: https://github.com/ygeunkim/bvhar/releases/tag/v1.0.1 - text: "Version 1.0.2" href: https://github.com/ygeunkim/bvhar/releases/tag/v1.0.2 - text: "Version 1.1.0" @@ -221,5 +221,5 @@ news: href: https://github.com/ygeunkim/bvhar/releases/tag/v2.0.1 - text: "Version 2.1.0" href: https://github.com/ygeunkim/bvhar/releases/tag/v2.1.0 - - text: "Version 2.1.1" - href: https://github.com/ygeunkim/bvhar/releases/tag/v2.1.1 + - text: "Version 2.1.2" + href: https://github.com/ygeunkim/bvhar/releases/tag/v2.1.2 diff --git a/cran-comments.md b/cran-comments.md index 5dc27022..6db76d3b 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,9 +1,10 @@ -## Minor version update +## Patch version update +This is a quick fix for fatal algorithm fault. In this version, we -- Fix some numerical process -- Fix a C++ macro issue +- Fixed MCMC algorithm when constant term exists. +- Fixed Forecasting algorithm for MCMC objects. ## Test environments @@ -18,7 +19,10 @@ In this version, we 0 errors | 0 warnings | 1 note -* NOTE in local machine: HTML validation NOTE on local environment (aarch64-apple-darwin20) check. This note appears to be specific to my local machine and had no problem in previous CRAN checks. +* NOTE for CRAN incoming feasibility (Days since last update: 6): +The algorithms in the current version have some serious issues, +so an update is needed ASAP. +* NOTE only in local machine: HTML validation NOTE on local environment check. This note appears to be specific to my local machine and had no problem in previous CRAN checks. ## revdepcheck results diff --git a/inst/include/commondefs.h b/inst/include/commondefs.h index 6ce9da31..0cc11dac 100644 --- a/inst/include/commondefs.h +++ b/inst/include/commondefs.h @@ -2,8 +2,6 @@ #define COMMONDEFS_H #ifdef USE_RCPP - #define USE_RCPP - #include #define STOP(...) Rcpp::stop(__VA_ARGS__) diff --git a/inst/include/mcmcreg.h b/inst/include/mcmcreg.h index 8b164f3d..ddaf30d6 100644 --- a/inst/include/mcmcreg.h +++ b/inst/include/mcmcreg.h @@ -443,20 +443,31 @@ class McmcReg { Eigen::MatrixXd sqrt_sv_j = sqrt_sv.rightCols(dim - j); // use h_jt to h_kt for t = 1, .. n => (k - j + 1) x k Eigen::MatrixXd design_coef = kronecker_eigen(chol_lower_j.col(j), x).array().colwise() / sqrt_sv_j.reshaped().array(); // L_(j:k, j) otimes X0 scaled by D_(1:n, j:k): n(k - j + 1) x kp // Eigen::VectorXd response_j = (((y - x * coef_mat) * chol_lower_j.transpose()).array() / sqrt_sv_j.array()).reshaped(); // Hadamard product between: (Y - X0 A(-j))L_(j:k)^T and D_(1:n, j:k) - draw_coef( - coef_mat.col(j), - design_coef, - (((y - x * coef_mat) * chol_lower_j.transpose()).array() / sqrt_sv_j.array()).reshaped(), - prior_alpha_mean.segment(dim_design * j, dim_design), // Prior mean vector of j-th column of A - prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A - rng - ); + if (include_mean) { + Eigen::VectorXd prior_mean_j(dim_design); + Eigen::VectorXd prior_prec_j(dim_design); + prior_mean_j << prior_alpha_mean.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_mean.tail(dim)[j]; + prior_prec_j << prior_alpha_prec.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_prec.tail(dim)[j]; + draw_coef( + coef_mat.col(j), design_coef, + (((y - x * coef_mat) * chol_lower_j.transpose()).array() / sqrt_sv_j.array()).reshaped(), + prior_mean_j, prior_prec_j, rng + ); + coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); + coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose(); + } else { + draw_coef( + coef_mat.col(j), + design_coef, + (((y - x * coef_mat) * chol_lower_j.transpose()).array() / sqrt_sv_j.array()).reshaped(), + prior_alpha_mean.segment(dim_design * j, dim_design), // Prior mean vector of j-th column of A + prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A + rng + ); + coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); + } draw_savs(sparse_coef.col(j), coef_mat.col(j).head(num_alpha / dim), design_coef); } - coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); - if (include_mean) { - coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose(); - } } void updateDiag() { // ortho_latent = latent_innov * chol_lower.transpose(); // L eps_t <=> Z0 U @@ -515,26 +526,6 @@ class MinnReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), @@ -611,26 +602,6 @@ class HierminnReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), @@ -746,27 +717,6 @@ class SsvsReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("gamma_record") = ssvs_record.coef_dummy_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), @@ -886,34 +836,6 @@ class HorseshoeReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("lambda_record") = hs_record.local_record, -// Rcpp::Named("eta_record") = hs_record.group_record, -// Rcpp::Named("tau_record") = hs_record.global_record, -// Rcpp::Named("kappa_record") = hs_record.shrink_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// if (Rcpp::is(record)) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } else { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), @@ -1047,33 +969,6 @@ class NgReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("lambda_record") = ng_record.local_record, -// Rcpp::Named("eta_record") = ng_record.group_record, -// Rcpp::Named("tau_record") = ng_record.global_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// if (Rcpp::is(record)) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } else { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), @@ -1207,32 +1102,6 @@ class DlReg : public McmcReg { updateDiag(); updateRecords(); } -// #ifdef USE_RCPP -// Rcpp::List returnRecords(int num_burn, int thin) const override { -// Rcpp::List res = Rcpp::List::create( -// Rcpp::Named("alpha_record") = reg_record.coef_record.leftCols(num_alpha), -// Rcpp::Named("a_record") = reg_record.contem_coef_record, -// Rcpp::Named("d_record") = reg_record.fac_record, -// Rcpp::Named("lambda_record") = dl_record.local_record, -// Rcpp::Named("tau_record") = dl_record.global_record, -// Rcpp::Named("alpha_sparse_record") = sparse_record.coef_record, -// Rcpp::Named("a_sparse_record") = sparse_record.contem_coef_record -// ); -// if (include_mean) { -// res["c_record"] = reg_record.coef_record.rightCols(dim); -// } -// for (auto& record : res) { -// if (Rcpp::is(record)) { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } else { -// record = thin_record(Rcpp::as(record), num_iter, num_burn, thin); -// } -// } -// return res; -// } -// #else -// py::dict returnRecords(int num_burn, int thin) const override; -// #endif LIST returnRecords(int num_burn, int thin) const override { LIST res = CREATE_LIST( NAMED("alpha_record") = reg_record.coef_record.leftCols(num_alpha), diff --git a/inst/include/mcmcsv.h b/inst/include/mcmcsv.h index e78a0201..5f70a86b 100644 --- a/inst/include/mcmcsv.h +++ b/inst/include/mcmcsv.h @@ -515,20 +515,31 @@ class McmcSv { Eigen::MatrixXd sqrt_sv_j = sqrt_sv.rightCols(dim - j); // use h_jt to h_kt for t = 1, .. n => (k - j + 1) x k Eigen::MatrixXd design_coef = kronecker_eigen(chol_lower_j.col(j), x).array().colwise() * sqrt_sv_j.reshaped().array(); // L_(j:k, j) otimes X0 scaled by D_(1:n, j:k): n(k - j + 1) x kp // Eigen::VectorXd response_j = (((y - x * coef_mat) * chol_lower_j.transpose()).array() * sqrt_sv_j.array()).reshaped(); // Hadamard product between: (Y - X0 A(-j))L_(j:k)^T and D_(1:n, j:k) - draw_coef( - coef_mat.col(j), - design_coef, - (((y - x * coef_mat) * chol_lower_j.transpose()).array() * sqrt_sv_j.array()).reshaped(), - prior_alpha_mean.segment(dim_design * j, dim_design), // Prior mean vector of j-th column of A - prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A - rng - ); + if (include_mean) { + Eigen::VectorXd prior_mean_j(dim_design); + Eigen::VectorXd prior_prec_j(dim_design); + prior_mean_j << prior_alpha_mean.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_mean.tail(dim)[j]; + prior_prec_j << prior_alpha_prec.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_prec.tail(dim)[j]; + draw_coef( + coef_mat.col(j), design_coef, + (((y - x * coef_mat) * chol_lower_j.transpose()).array() * sqrt_sv_j.array()).reshaped(), + prior_mean_j, prior_prec_j, rng + ); + coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); + coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose(); + } else { + draw_coef( + coef_mat.col(j), + design_coef, + (((y - x * coef_mat) * chol_lower_j.transpose()).array() * sqrt_sv_j.array()).reshaped(), + prior_alpha_mean.segment(dim_design * j, dim_design), // Prior mean vector of j-th column of A + prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A + rng + ); + coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); + } draw_savs(sparse_coef.col(j), coef_mat.col(j).head(num_alpha / dim), design_coef); } - coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped(); - if (include_mean) { - coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose(); - } } void updateState() { ortho_latent = latent_innov * chol_lower.transpose(); // L eps_t <=> Z0 U diff --git a/inst/include/regforecaster.h b/inst/include/regforecaster.h index cc9de66b..49afb7f8 100644 --- a/inst/include/regforecaster.h +++ b/inst/include/regforecaster.h @@ -24,7 +24,7 @@ class RegForecaster { last_pvec(Eigen::VectorXd::Zero(dim_design)), sv_update(Eigen::VectorXd::Zero(dim)), post_mean(Eigen::VectorXd::Zero(dim)), - predictive_distn(Eigen::MatrixXd::Zero(step, num_sim * dim)), + // predictive_distn(Eigen::MatrixXd::Zero(step, num_sim * dim)), coef_mat(Eigen::MatrixXd::Zero(num_coef / dim, dim)), contem_mat(Eigen::MatrixXd::Identity(dim, dim)), standard_normal(Eigen::VectorXd::Zero(dim)), @@ -46,6 +46,8 @@ class RegForecaster { } Eigen::MatrixXd forecastDensity() { std::lock_guard lock(mtx); + Eigen::VectorXd mean_draw = post_mean.replicate(num_sim, 1); // cbind(sims) + Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims) for (int h = 0; h < step; h++) { last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec; for (int i = 0; i < num_sim; i++) { @@ -53,18 +55,23 @@ class RegForecaster { if (include_mean) { coef_mat.bottomRows(1) = reg_record.coef_record.row(i).tail(dim); } - last_pvec.head(dim) = post_mean; + // last_pvec.head(dim) = post_mean; + last_pvec.head(dim) = mean_draw.segment(i * dim, dim); // hat(y_{T + h - 1}^{(i)}) computeMean(i); updateVariance(i); post_mean += contem_mat.triangularView().solve(standard_normal); // N(post_mean, L^-1 D L) - predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + // predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + mean_draw.segment(i * dim, dim) = post_mean; // hat(Y_{T + h}^{(i)}) } + predictive_distn.row(h) = mean_draw; tmp_vec = last_pvec.head((var_lag - 1) * dim); } return predictive_distn; } Eigen::MatrixXd forecastDensity(const Eigen::VectorXd& valid_vec) { std::lock_guard lock(mtx); + Eigen::VectorXd mean_draw = post_mean.replicate(num_sim, 1); // cbind(sims) + Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims) for (int h = 0; h < step; h++) { last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec; for (int i = 0; i < num_sim; i++) { @@ -72,13 +79,16 @@ class RegForecaster { if (include_mean) { coef_mat.bottomRows(1) = reg_record.coef_record.row(i).tail(dim); } - last_pvec.head(dim) = post_mean; + // last_pvec.head(dim) = post_mean; + last_pvec.head(dim) = mean_draw.segment(i * dim, dim); computeMean(i); // can have overflow issue due to stability of each coef_record updateVariance(i); - post_mean += contem_mat.triangularView().solve(standard_normal); // N(post_mean, L^-1 D L) - predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + post_mean += contem_mat.triangularView().solve(standard_normal); // N(post_mean, L^-1 D (L^T)^-1) + // predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + mean_draw.segment(i * dim, dim) = post_mean; lpl[h] += sv_update.sum() / 2 - dim * log(2 * M_PI) / 2 - (sv_update.cwiseSqrt().cwiseInverse().array() * (contem_mat * (post_mean - valid_vec)).array()).matrix().squaredNorm() / 2; } + predictive_distn.row(h) = mean_draw; lpl[h] /= num_sim; tmp_vec = last_pvec.head((var_lag - 1) * dim); } @@ -107,7 +117,7 @@ class RegForecaster { Eigen::VectorXd last_pvec; // [ y_(T + h - 1)^T, y_(T + h - 2)^T, ..., y_(T + h - p)^T, 1 ] (1 when constant term) Eigen::VectorXd sv_update; // d_1, ..., d_m Eigen::VectorXd post_mean; // posterior mean and y_(T + h - 1) - Eigen::MatrixXd predictive_distn; // rbind(step), cbind(sims) + // Eigen::MatrixXd predictive_distn; // rbind(step), cbind(sims) Eigen::MatrixXd coef_mat; // include constant term when include_mean = true Eigen::MatrixXd contem_mat; // L Eigen::VectorXd standard_normal; // Z ~ N(0, I) diff --git a/inst/include/svforecaster.h b/inst/include/svforecaster.h index e5d73901..2f0b0b97 100644 --- a/inst/include/svforecaster.h +++ b/inst/include/svforecaster.h @@ -24,7 +24,7 @@ class SvForecaster { last_pvec(Eigen::VectorXd::Zero(dim_design)), sv_update(Eigen::VectorXd::Zero(dim)), post_mean(Eigen::VectorXd::Zero(dim)), - predictive_distn(Eigen::MatrixXd::Zero(step, num_sim * dim)), + // predictive_distn(Eigen::MatrixXd::Zero(step, num_sim * dim)), coef_mat(Eigen::MatrixXd::Zero(num_coef / dim, dim)), contem_mat(Eigen::MatrixXd::Identity(dim, dim)), h_last_record(sv_record.lvol_record.rightCols(dim)), @@ -52,6 +52,8 @@ class SvForecaster { } Eigen::MatrixXd forecastDensity(bool sv) { std::lock_guard lock(mtx); + Eigen::VectorXd mean_draw = post_mean.replicate(num_sim, 1); // cbind(sims) + Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims) for (int h = 0; h < step; h++) { last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec; for (int i = 0; i < num_sim; i++) { @@ -59,20 +61,25 @@ class SvForecaster { if (include_mean) { coef_mat.bottomRows(1) = sv_record.coef_record.row(i).tail(dim); } - last_pvec.head(dim) = post_mean; + // last_pvec.head(dim) = post_mean; + last_pvec.head(dim) = mean_draw.segment(i * dim, dim); computeMean(i); // can have overflow issue due to stability of each coef_record if (sv) { updateVariance(i); post_mean += contem_mat.triangularView().solve(standard_normal); // N(post_mean, L^-1 D L) } - predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + // predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + mean_draw.segment(i * dim, dim) = post_mean; } + predictive_distn.row(h) = mean_draw; tmp_vec = last_pvec.head((var_lag - 1) * dim); } return predictive_distn; } Eigen::MatrixXd forecastDensity(const Eigen::VectorXd& valid_vec, bool sv) { std::lock_guard lock(mtx); + Eigen::VectorXd mean_draw = post_mean.replicate(num_sim, 1); // cbind(sims) + Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims) for (int h = 0; h < step; h++) { last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec; for (int i = 0; i < num_sim; i++) { @@ -80,15 +87,18 @@ class SvForecaster { if (include_mean) { coef_mat.bottomRows(1) = sv_record.coef_record.row(i).tail(dim); } - last_pvec.head(dim) = post_mean; + // last_pvec.head(dim) = post_mean; + last_pvec.head(dim) = mean_draw.segment(i * dim, dim); computeMean(i); // can have overflow issue due to stability of each coef_record if (sv) { updateVariance(i); post_mean += contem_mat.triangularView().solve(standard_normal); // N(post_mean, L^-1 D L) } - predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + // predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); + mean_draw.segment(i * dim, dim) = post_mean; lpl[h] += sv_update.sum() / 2 - dim * log(2 * M_PI) / 2 - ((-sv_update / 2).array().exp() * (contem_mat * (post_mean - valid_vec)).array()).matrix().squaredNorm() / 2; } + predictive_distn.row(h) = mean_draw; lpl[h] /= num_sim; tmp_vec = last_pvec.head((var_lag - 1) * dim); } @@ -117,7 +127,7 @@ class SvForecaster { Eigen::VectorXd last_pvec; // [ y_(T + h - 1)^T, y_(T + h - 2)^T, ..., y_(T + h - p)^T, 1 ] (1 when constant term) Eigen::VectorXd sv_update; // h_(T + h) Eigen::VectorXd post_mean; // posterior mean and y_(T + h - 1) - Eigen::MatrixXd predictive_distn; // rbind(step), cbind(sims) + // Eigen::MatrixXd predictive_distn; // rbind(step), cbind(sims) Eigen::MatrixXd coef_mat; // include constant term when include_mean = true Eigen::MatrixXd contem_mat; // L Eigen::MatrixXd h_last_record; // h_T record diff --git a/man/forecast_expand.Rd b/man/forecast_expand.Rd index 0dc8e376..d9b5981a 100644 --- a/man/forecast_expand.Rd +++ b/man/forecast_expand.Rd @@ -19,6 +19,7 @@ forecast_expand(object, n_ahead, y_test, num_thread = 1, ...) n_ahead, y_test, num_thread = 1, + level = 0.05, sparse = FALSE, lpl = FALSE, use_fit = TRUE, @@ -30,6 +31,7 @@ forecast_expand(object, n_ahead, y_test, num_thread = 1, ...) n_ahead, y_test, num_thread = 1, + level = 0.05, use_sv = TRUE, sparse = FALSE, lpl = FALSE, @@ -50,6 +52,8 @@ forecast_expand(object, n_ahead, y_test, num_thread = 1, ...) \item{use_fit}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Use \code{object} result for the first window. By default, \code{TRUE}.} +\item{level}{Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05.} + \item{sparse}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Apply restriction. By default, \code{FALSE}.} \item{lpl}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Compute log-predictive likelihood (LPL). By default, \code{FALSE}.} diff --git a/man/forecast_roll.Rd b/man/forecast_roll.Rd index bfa19c75..2e08584c 100644 --- a/man/forecast_roll.Rd +++ b/man/forecast_roll.Rd @@ -29,6 +29,7 @@ is.bvharcv(x) n_ahead, y_test, num_thread = 1, + level = 0.05, sparse = FALSE, lpl = FALSE, use_fit = TRUE, @@ -40,6 +41,7 @@ is.bvharcv(x) n_ahead, y_test, num_thread = 1, + level = 0.05, use_sv = TRUE, sparse = FALSE, lpl = FALSE, @@ -64,6 +66,8 @@ is.bvharcv(x) \item{use_fit}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Use \code{object} result for the first window. By default, \code{TRUE}.} +\item{level}{Specify alpha of confidence interval level 100(1 - alpha) percentage. By default, .05.} + \item{sparse}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Apply restriction. By default, \code{FALSE}.} \item{lpl}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Compute log-predictive likelihood (LPL). By default, \code{FALSE}.} diff --git a/revdep/README.md b/revdep/README.md index 18091002..5d836f30 100644 --- a/revdep/README.md +++ b/revdep/README.md @@ -1,23 +1,23 @@ # Platform -|field |value | -|:--------|:-------------------------------------| -|version |R version 4.3.3 (2024-02-29) | -|os |macOS Big Sur ... 10.16 | -|system |x86_64, darwin20 | -|ui |X11 | -|language |(EN) | -|collate |en_US.UTF-8 | -|ctype |en_US.UTF-8 | -|tz |Asia/Seoul | -|date |2024-10-05 | -|pandoc |3.4 @ /usr/local/bin/ (via rmarkdown) | +|field |value | +|:--------|:----------------------------| +|version |R version 4.3.3 (2024-02-29) | +|os |macOS Big Sur ... 10.16 | +|system |x86_64, darwin20 | +|ui |X11 | +|language |(EN) | +|collate |en_US.UTF-8 | +|ctype |en_US.UTF-8 | +|tz |Asia/Seoul | +|date |2024-10-11 | +|pandoc |3.5 @ /usr/local/bin/pandoc | # Dependencies |package |old |new |Δ | |:--------------|:-----|:---------|:--| -|bvhar |2.1.0 |2.1.1 |* | +|bvhar |2.1.1 |2.1.2 |* | |abind |NA |1.4-8 |* | |backports |NA |1.5.0 |* | |checkmate |NA |2.3.2 |* | diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index 252182fb..f2f9f9a7 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -843,8 +843,8 @@ BEGIN_RCPP END_RCPP } // roll_bvarldlt -Rcpp::List roll_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_roll_bvarldlt(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List roll_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_roll_bvarldlt(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -873,14 +873,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(roll_bvarldlt(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(roll_bvarldlt(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // roll_bvharldlt -Rcpp::List roll_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_roll_bvharldlt(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List roll_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_roll_bvharldlt(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -910,14 +909,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(roll_bvharldlt(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(roll_bvharldlt(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // expand_bvarldlt -Rcpp::List expand_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_expand_bvarldlt(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List expand_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_expand_bvarldlt(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -946,14 +944,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(expand_bvarldlt(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(expand_bvarldlt(y, lag, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // expand_bvharldlt -Rcpp::List expand_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_expand_bvharldlt(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List expand_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_expand_bvharldlt(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_regSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -983,8 +980,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(expand_bvharldlt(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(expand_bvharldlt(y, week, month, num_chains, num_iter, num_burn, thinning, sparse, level, fit_record, param_reg, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } @@ -1258,8 +1254,8 @@ BEGIN_RCPP END_RCPP } // roll_bvarsv -Rcpp::List roll_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_roll_bvarsv(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List roll_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_roll_bvarsv(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -1289,14 +1285,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(roll_bvarsv(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(roll_bvarsv(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // roll_bvharsv -Rcpp::List roll_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_roll_bvharsv(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List roll_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_roll_bvharsv(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -1327,14 +1322,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(roll_bvharsv(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(roll_bvharsv(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // expand_bvarsv -Rcpp::List expand_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_expand_bvarsv(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List expand_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_expand_bvarsv(SEXP ySEXP, SEXP lagSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -1364,14 +1358,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(expand_bvarsv(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(expand_bvarsv(y, lag, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } // expand_bvharsv -Rcpp::List expand_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size); -RcppExport SEXP _bvhar_expand_bvharsv(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP, SEXP chunk_sizeSEXP) { +Rcpp::List expand_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, int num_iter, int num_burn, int thinning, bool sv, bool sparse, double level, Rcpp::List fit_record, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads); +RcppExport SEXP _bvhar_expand_bvharsv(SEXP ySEXP, SEXP weekSEXP, SEXP monthSEXP, SEXP num_chainsSEXP, SEXP num_iterSEXP, SEXP num_burnSEXP, SEXP thinningSEXP, SEXP svSEXP, SEXP sparseSEXP, SEXP levelSEXP, SEXP fit_recordSEXP, SEXP param_svSEXP, SEXP param_priorSEXP, SEXP param_interceptSEXP, SEXP param_initSEXP, SEXP prior_typeSEXP, SEXP grp_idSEXP, SEXP own_idSEXP, SEXP cross_idSEXP, SEXP grp_matSEXP, SEXP include_meanSEXP, SEXP stepSEXP, SEXP y_testSEXP, SEXP get_lplSEXP, SEXP seed_chainSEXP, SEXP seed_forecastSEXP, SEXP nthreadsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -1402,8 +1395,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< Eigen::MatrixXi >::type seed_chain(seed_chainSEXP); Rcpp::traits::input_parameter< Eigen::VectorXi >::type seed_forecast(seed_forecastSEXP); Rcpp::traits::input_parameter< int >::type nthreads(nthreadsSEXP); - Rcpp::traits::input_parameter< int >::type chunk_size(chunk_sizeSEXP); - rcpp_result_gen = Rcpp::wrap(expand_bvharsv(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads, chunk_size)); + rcpp_result_gen = Rcpp::wrap(expand_bvharsv(y, week, month, num_chains, num_iter, num_burn, thinning, sv, sparse, level, fit_record, param_sv, param_prior, param_intercept, param_init, prior_type, grp_id, own_id, cross_id, grp_mat, include_mean, step, y_test, get_lpl, seed_chain, seed_forecast, nthreads)); return rcpp_result_gen; END_RCPP } @@ -1886,10 +1878,10 @@ static const R_CallMethodDef CallEntries[] = { {"_bvhar_forecast_bvharhs_deprecate", (DL_FUNC) &_bvhar_forecast_bvharhs_deprecate, 7}, {"_bvhar_forecast_bvarldlt", (DL_FUNC) &_bvhar_forecast_bvarldlt, 11}, {"_bvhar_forecast_bvharldlt", (DL_FUNC) &_bvhar_forecast_bvharldlt, 12}, - {"_bvhar_roll_bvarldlt", (DL_FUNC) &_bvhar_roll_bvarldlt, 26}, - {"_bvhar_roll_bvharldlt", (DL_FUNC) &_bvhar_roll_bvharldlt, 27}, - {"_bvhar_expand_bvarldlt", (DL_FUNC) &_bvhar_expand_bvarldlt, 26}, - {"_bvhar_expand_bvharldlt", (DL_FUNC) &_bvhar_expand_bvharldlt, 27}, + {"_bvhar_roll_bvarldlt", (DL_FUNC) &_bvhar_roll_bvarldlt, 25}, + {"_bvhar_roll_bvharldlt", (DL_FUNC) &_bvhar_roll_bvharldlt, 26}, + {"_bvhar_expand_bvarldlt", (DL_FUNC) &_bvhar_expand_bvarldlt, 25}, + {"_bvhar_expand_bvharldlt", (DL_FUNC) &_bvhar_expand_bvharldlt, 26}, {"_bvhar_forecast_bvar", (DL_FUNC) &_bvhar_forecast_bvar, 3}, {"_bvhar_forecast_bvharmn", (DL_FUNC) &_bvhar_forecast_bvharmn, 3}, {"_bvhar_roll_bvar", (DL_FUNC) &_bvhar_roll_bvar, 7}, @@ -1906,10 +1898,10 @@ static const R_CallMethodDef CallEntries[] = { {"_bvhar_expand_vhar", (DL_FUNC) &_bvhar_expand_vhar, 8}, {"_bvhar_forecast_bvarsv", (DL_FUNC) &_bvhar_forecast_bvarsv, 12}, {"_bvhar_forecast_bvharsv", (DL_FUNC) &_bvhar_forecast_bvharsv, 13}, - {"_bvhar_roll_bvarsv", (DL_FUNC) &_bvhar_roll_bvarsv, 27}, - {"_bvhar_roll_bvharsv", (DL_FUNC) &_bvhar_roll_bvharsv, 28}, - {"_bvhar_expand_bvarsv", (DL_FUNC) &_bvhar_expand_bvarsv, 27}, - {"_bvhar_expand_bvharsv", (DL_FUNC) &_bvhar_expand_bvharsv, 28}, + {"_bvhar_roll_bvarsv", (DL_FUNC) &_bvhar_roll_bvarsv, 26}, + {"_bvhar_roll_bvharsv", (DL_FUNC) &_bvhar_roll_bvharsv, 27}, + {"_bvhar_expand_bvarsv", (DL_FUNC) &_bvhar_expand_bvarsv, 26}, + {"_bvhar_expand_bvharsv", (DL_FUNC) &_bvhar_expand_bvharsv, 27}, {"_bvhar_compute_stablemat", (DL_FUNC) &_bvhar_compute_stablemat, 1}, {"_bvhar_compute_var_stablemat", (DL_FUNC) &_bvhar_compute_var_stablemat, 1}, {"_bvhar_compute_vhar_stablemat", (DL_FUNC) &_bvhar_compute_vhar_stablemat, 1}, diff --git a/src/forecast-ldlt.cpp b/src/forecast-ldlt.cpp index 5fcd1f33..c84206f7 100644 --- a/src/forecast-ldlt.cpp +++ b/src/forecast-ldlt.cpp @@ -198,7 +198,6 @@ Rcpp::List forecast_bvharldlt(int num_chains, int month, int step, Eigen::Matrix //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -207,7 +206,7 @@ Rcpp::List roll_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_ite Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -447,7 +446,7 @@ Rcpp::List roll_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_ite } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(static, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -493,7 +492,6 @@ Rcpp::List roll_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_ite //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -502,7 +500,7 @@ Rcpp::List roll_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -743,7 +741,7 @@ Rcpp::List roll_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(static, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -789,7 +787,6 @@ Rcpp::List roll_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chains //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -798,7 +795,7 @@ Rcpp::List expand_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_i Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -1038,7 +1035,7 @@ Rcpp::List expand_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_i } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(dynamic, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(dynamic, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -1084,7 +1081,6 @@ Rcpp::List expand_bvarldlt(Eigen::MatrixXd y, int lag, int num_chains, int num_i //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -1093,7 +1089,7 @@ Rcpp::List expand_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chai Rcpp::List param_reg, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { #ifdef _OPENMP Eigen::setNbThreads(nthreads); #endif @@ -1337,7 +1333,7 @@ Rcpp::List expand_bvharldlt(Eigen::MatrixXd y, int week, int month, int num_chai } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(dynamic, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(dynamic, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { diff --git a/src/forecast-sv.cpp b/src/forecast-sv.cpp index 3ab3e834..0a2813e1 100644 --- a/src/forecast-sv.cpp +++ b/src/forecast-sv.cpp @@ -210,7 +210,6 @@ Rcpp::List forecast_bvharsv(int num_chains, int month, int step, Eigen::MatrixXd //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -219,7 +218,7 @@ Rcpp::List roll_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -467,7 +466,7 @@ Rcpp::List roll_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(static, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -513,7 +512,6 @@ Rcpp::List roll_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_iter, //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -522,7 +520,7 @@ Rcpp::List roll_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -771,7 +769,7 @@ Rcpp::List roll_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(static, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(static, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -817,7 +815,6 @@ Rcpp::List roll_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains, //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -826,7 +823,7 @@ Rcpp::List expand_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_ite Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { int num_window = y.rows(); int dim = y.cols(); int num_test = y_test.rows(); @@ -1074,7 +1071,7 @@ Rcpp::List expand_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_ite } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(dynamic, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(dynamic, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { @@ -1120,7 +1117,6 @@ Rcpp::List expand_bvarsv(Eigen::MatrixXd y, int lag, int num_chains, int num_ite //' @param step Integer, Step to forecast //' @param y_test Evaluation time series data period after `y` //' @param nthreads Number of threads -//' @param chunk_size Chunk size for OpenMP static scheduling //' //' @noRd // [[Rcpp::export]] @@ -1129,7 +1125,7 @@ Rcpp::List expand_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains Rcpp::List param_sv, Rcpp::List param_prior, Rcpp::List param_intercept, Rcpp::List param_init, int prior_type, Eigen::VectorXi grp_id, Eigen::VectorXi own_id, Eigen::VectorXi cross_id, Eigen::MatrixXi grp_mat, bool include_mean, int step, Eigen::MatrixXd y_test, - bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads, int chunk_size) { + bool get_lpl, Eigen::MatrixXi seed_chain, Eigen::VectorXi seed_forecast, int nthreads) { #ifdef _OPENMP Eigen::setNbThreads(nthreads); #endif @@ -1381,7 +1377,7 @@ Rcpp::List expand_bvharsv(Eigen::MatrixXd y, int week, int month, int num_chains } } else { #ifdef _OPENMP - #pragma omp parallel for collapse(2) schedule(dynamic, chunk_size) num_threads(nthreads) + #pragma omp parallel for collapse(2) schedule(dynamic, num_chains) num_threads(nthreads) #endif for (int window = 0; window < num_horizon; window++) { for (int chain = 0; chain < num_chains; chain++) { diff --git a/vignettes/linking.Rmd b/vignettes/linking.Rmd index f1f799d0..524fc246 100644 --- a/vignettes/linking.Rmd +++ b/vignettes/linking.Rmd @@ -61,8 +61,18 @@ Also, you can use in your single `C++` source: ``` // [[Rcpp::depends(BH, RcppEigen, bvhar)]] +// [[Rcpp::plugins(bvhar)]] ``` +You need to add `plugins` attribute because the header in this package should define `USE_RCPP` macro. +Or you can use instead: + +```{r, eval=FALSE} +Sys.setenv("PKG_CPPFLAGS" = "-DUSE_RCPP") +``` + +without using plugins attribute. + ## MCMC headers `mcmc*.h` has classes that can conduct MCMC.