diff --git a/.Rbuildignore b/.Rbuildignore index f86b949..4df53a4 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -45,3 +45,4 @@ hub_cov\.R covr.html covr.rds website +^vignettes/articles$ diff --git a/.github/workflows/render-website.yml b/.github/workflows/render-website.yml index f26691a..82e9800 100644 --- a/.github/workflows/render-website.yml +++ b/.github/workflows/render-website.yml @@ -7,6 +7,8 @@ on: paths: - '.github/workflows/render-website.yml' - 'DESCRIPTION' + - 'inst/NEWS' + - 'inst/NEWS.Rd' - 'R/**' - 'vignettes/**' - 'website/**' # Make sure to include the path to your website files @@ -87,7 +89,10 @@ jobs: - name: Copy Vignettes and Other Files run: | - cp -r vignettes website/ + mkdir -p website/vignettes + cp vignettes/getting-started.Rmd website/vignettes/. + cp vignettes/articles/package_tutorial/tutorial.pdf website/vignettes/. + # cp -r vignettes website/ Rscript --vanilla website/rmd2qmd.R website/vignettes/getting-started rm website/vignettes/getting-started.Rmd tree website diff --git a/.gitignore b/.gitignore index ed287eb..d54f204 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,10 @@ website/html website/help inst/doc website/vignettes/* +vignettes/articles/package_tutorial/tutorial.aux +vignettes/articles/package_tutorial/tutorial.bbl +vignettes/articles/package_tutorial/tutorial-concordance.tex +vignettes/articles/package_tutorial/tutorial.log +vignettes/articles/package_tutorial/tutorial.tex +vignettes/articles/package_tutorial/tutorial.synctex.gz +!vignettes/articles/package_tutorial/tutorial.pdf diff --git a/DESCRIPTION b/DESCRIPTION index fe63147..3b3e1d0 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: panelPomp Type: Package Title: Inference for Panel Partially Observed Markov Processes -Version: 1.2.0 +Version: 1.3.0 Date: 2023-05-22 Authors@R: c(person(given="Carles",family="Breto",role=c("aut","cre"),email="carles.breto@uv.es",comment=c(ORCID="0000-0003-4695-4902")), person(given=c("Edward","L."),family="Ionides",role="aut",comment=c(ORCID="0000-0002-4190-0174")), @@ -15,7 +15,7 @@ Depends: Imports: lifecycle, methods -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 Encoding: UTF-8 Collate: 'panelPomp-package.R' @@ -42,5 +42,5 @@ Roxygen: list(markdown = TRUE) Suggests: knitr, rmarkdown, - bookdown + bookdown VignetteBuilder: knitr diff --git a/NAMESPACE b/NAMESPACE index ace2ee5..06c6551 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -3,21 +3,19 @@ export("shared<-") export("specific<-") export(contacts) -export(fromVectorPparams) export(get_col) export(get_row) -export(pParams) export(panelGompertz) export(panelGompertzLikelihood) export(panelPomp) export(panelRandomWalk) export(panel_logmeanexp) -export(pparams) export(runif_panel_design) export(shared) export(specific) export(toMatrixPparams) -export(toVectorPparams) +export(toParamList) +export(toParamVec) export(unitLogLik) export(unit_objects) export(unitlogLik) @@ -37,7 +35,6 @@ exportMethods(mif2) exportMethods(names) exportMethods(pfilter) exportMethods(plot) -exportMethods(pparams) exportMethods(print) exportMethods(shared) exportMethods(show) diff --git a/R/generics.R b/R/generics.R index 9afb23a..b324b0f 100644 --- a/R/generics.R +++ b/R/generics.R @@ -1,28 +1,5 @@ ## generic functions -#' @title Extract parameters (coefficients) of a panel model -#' @description \code{pparams()} is a generic function that extracts parameter -#' (coefficient) values from objects returned by panel modeling functions. While -#' the named \code{numeric} vector format is useful and possible via S4 methods -#' for \code{coef()}, alternative formats capturing the panel structure can be -#' implemented via \code{pparams()}. -#' @param object an object for which extraction of panel model parameter -#' (coefficient) values is meaningful. -#' @param ... additional arguments. -#' @details This is a generic function: methods can be defined for it. -#' @return Parameter (coefficient) values extracted from the panel model -#' \code{object}. -#' -#' \pparamsReturn -#' @example examples/prw.R -#' @example examples/pparams.R -#' @keywords internal -#' @seealso \link{panelPomp_methods} -#' @author Carles \Breto -#' @export -setGeneric(name = "pparams", - def = function(object, ...) standardGeneric("pparams")) - #' @title Extract units of a panel model #' @description \code{unit_objects()} is a generic function that extracts a list #' of objects corresponding to units of panel objects returned by panel modeling diff --git a/R/mif2.R b/R/mif2.R index 471d55f..5fde42f 100644 --- a/R/mif2.R +++ b/R/mif2.R @@ -206,7 +206,7 @@ mif2.internal <- function (object, Nmif, start, Np, rw.sd, cooling.type, # Extract loglikelihoods unit.logliks <- sapply(X = output, FUN = logLik) ploglik <- sum(unit.logliks) - # create pParams slot from last mif iteration values in pconv.rec + # create parameter list from last mif iteration values in pconv.rec pParams <- list() pParams$shared <- pconv.rec[nrow(pconv.rec), -1L] # Here, we want to drop the iteration dimension but, if there was only one @@ -262,7 +262,7 @@ setMethod( "mif2", signature=signature(data="panelPomp"), definition = function (data, Nmif = 1, shared.start, specific.start, start, - Np, rw.sd, cooling.type = c("hyperbolic", "geometric"), + Np, rw.sd, cooling.type = c("geometric", "hyperbolic"), cooling.fraction.50, block = FALSE, verbose = getOption("verbose"), ...) { object <- data @@ -275,7 +275,7 @@ setMethod( if (missing(start)) { start <- list(shared=object@shared,specific=object@specific) } else { - if (is.numeric(start)) start <- pParams(start) + if (is.numeric(start)) start <- toParamList(start) } if (missing(shared.start)) shared.start <- start$shared if (missing(specific.start)) specific.start <- start$specific @@ -332,7 +332,7 @@ setMethod( if (missing(start)) { start <- list(shared=object@shared,specific=object@specific) } else { - if (is.numeric(start)) start <- pParams(start) + if (is.numeric(start)) start <- toParamList(start) } if (missing(shared.start)) shared.start <- start$shared if (missing(specific.start)) specific.start <- start$specific diff --git a/R/panelPomp.R b/R/panelPomp.R index de2ad6b..4dd853e 100644 --- a/R/panelPomp.R +++ b/R/panelPomp.R @@ -179,7 +179,7 @@ panelPomp <- function (object, shared, specific, params) { call.=FALSE) } else { if (is.numeric(params) && !is.null(names(params))) { - params <- pParams(params) + params <- toParamList(params) } else { stop(wQuotes(ep,"''params'' must be a named numeric vector"),call.=FALSE) } diff --git a/R/panelPomp_methods.R b/R/panelPomp_methods.R index 03e3e8e..e6c74c5 100644 --- a/R/panelPomp_methods.R +++ b/R/panelPomp_methods.R @@ -20,8 +20,7 @@ NULL #' \item{coef<-}{Assign coefficients to \code{panelPomp} objects.} #' \item{length}{Count the number of units in \code{panelPomp} objects.} #' \item{names}{Get the unit names of \code{panelPomp} objects.} -#' \item{pparams}{Extracts coefficients from \code{panelPomp} objects in list form.} -#' \item{pParams}{Converts panel coefficients from vector form to list form.} +#' \item{toParamList}{Converts panel coefficients from vector form to list form.} #' \item{window}{Subset \code{panelPomp} objects by changing start time and #' end time.} #' \item{\code{[]}}{Take a subset of units.} @@ -42,15 +41,21 @@ NULL setMethod( "coef", signature=signature(object="panelPomp"), - definition = function (object) { + definition = function (object, format = c("vector", 'list')) { + out_type <- match.arg(format) pmat <- object@specific - c( - object@shared, - setNames( - as.numeric(pmat), - outer(rownames(pmat),colnames(pmat),sprintf,fmt="%s[%s]") + + if (out_type == 'vector') { + c( + object@shared, + setNames( + as.numeric(pmat), + outer(rownames(pmat),colnames(pmat),sprintf,fmt="%s[%s]") + ) ) - ) + } else if (out_type == 'list') { + list(shared=object@shared,specific=object@specific) + } } ) @@ -114,28 +119,17 @@ setMethod( definition = function (x) names(x@unit_objects) ) -#' @rdname panelPomp_methods -#' @return \pparamsReturn -# \pparamsReturn is resused in documentation of generic function introduced by the panelPomp package -#' @example examples/pparams.R -#' @export -setMethod( - "pparams", - signature=signature(object="panelPomp"), - definition = function (object) - list(shared=object@shared,specific=object@specific) -) - #' @rdname panelPomp_methods #' @return -#' \code{pParams()} returns a \code{list} with the model parameters in list form. +#' \code{toParamList()} returns a \code{list} with the model parameters in list form. #' @examples #' # convert vector-form parameters to list-form parameters -#' pParams(coef(prw)) +#' toParamList(coef(prw)) #' @export -pParams <- function (value) { +toParamList <- function (value) { - ep <- wQuotes("in ''pParams'': ") + ep <- wQuotes("in ''toParamList'': ") + if (is.list(value)) stop(ep, 'input is already a list.', call. = FALSE) if (!is.vector(value)) stop(ep, "input must be a vector.", call. = FALSE) nn <- grep("^.+\\[.+?\\]$", names(value), perl = TRUE, value = TRUE) @@ -183,7 +177,7 @@ setMethod( cat("panel of",length(object),ifelse(length(object)>1,"units","unit"),"\n") if (length(coef(object))>0) { cat("parameter(s):\n") - print(pParams(coef(object))) + print(coef(object, format = 'list')) } else { cat("parameter(s) unspecified\n"); } diff --git a/R/params.R b/R/params.R index 49c2835..f641553 100644 --- a/R/params.R +++ b/R/params.R @@ -3,89 +3,37 @@ #' @include panelPomp_methods.R NULL -#' @title Convert to and from a \code{panelPomp} object \code{pParams} slot format and a one-row \code{data.frame} +#' @title Manipulating \code{panelPomp} object parameter formats #' @description These facilitate keeping a record of evaluated log likelihoods. -#' @param vec_pars A one-row \code{data.frame} with format matching that of the -#' output of \link{toVectorPparams}. -#' @param pParams A list with the format of the \code{pParams} slot of \code{panelPomp} objects. +#' @param pParams A list with both shared (vector) and unit-specific (matrix) parameters. #' @name params NULL #' @rdname params #' @author Carles \Breto #' @return -#' \code{toVectorPparams()} returns an object of class \code{data.frame}. +#' \code{toParamVec()} returns model parameters in vector form. This function +#' is the inverse of \link{toParamList} #' @examples #' prw <- panelRandomWalk() -#' toVectorPparams(pparams(prw)) +#' toParamVec(coef(prw, format = 'list')) #' @export -toVectorPparams <- function(pParams) { - # rbind replicated shared parameters with matrix of specific parameters - mat_pars <- rbind( - matrix( - rep(pParams$shared, - times=ncol(pParams$specific)), - ncol = ncol(pParams$specific), - dimnames = list(names(pParams$shared), NULL) - ), - pParams$specific - ) - # vectorize the matrix - vec_pars <- setNames( - as.vector(mat_pars), - nm=paste0(rep(colnames(mat_pars), each = nrow(mat_pars)), - rownames(mat_pars))) - # Append info about ... - #... nature of parameters (shared and specific), and ... - par_typ <- setNames( - c(rep("shared",times=length(pParams$shared)), - rep("specific",times=nrow(pParams$specific))), - nm=c(names(pParams$shared),rownames(pParams$specific))) - # ... unit names - u_nms <- setNames( - rep("unit_name",ncol(mat_pars)), - nm=colnames(pParams$specific) - ) - # return - merge(data.frame(t(par_typ),stringsAsFactors=FALSE), - y=merge(data.frame(t(u_nms),stringsAsFactors=FALSE), - y=data.frame(t(vec_pars)))) -} +toParamVec <- function(pParams) { -#' @rdname params -# @author Carles \Breto -#' @return -#' \code{fromVectorPparams()} returns an object of class \code{list} with the -#' model parameters in list form. -#' @examples -#' fromVectorPparams(toVectorPparams(pparams(prw))) -#' @export -fromVectorPparams <- function(vec_pars) { - # Extract unit, shared, and specific names - sh_nms <- names(vec_pars[,!is.na(vec_pars=="shared")&vec_pars=="shared",drop=FALSE]) - sp_nms <- names(vec_pars[,!is.na(vec_pars=="specific")&vec_pars=="specific",drop=FALSE]) - u_nms <- names(vec_pars)[vec_pars=="unit_name"] - # shared - sh_pars <- if(length(sh_nms)>0) { - sh_pars <- setNames(as.numeric(vec_pars[,paste0(u_nms[1],sh_nms)]),nm=sh_nms) - } else { - numeric(0) - } - # specific - if(length(sp_nms)>0) { - mat_sps <- NULL - for (i.u in seq_len(length(u_nms))) { - mat_sps <- cbind( - mat_sps, - as.numeric(vec_pars[, paste0(u_nms[i.u],sp_nms)]) - ) - } - dimnames(mat_sps) <- list(sp_nms,u_nms) - } else { - mat_sps <- array(numeric(),dim=c(0,length(u_nms)),dimnames=list(NULL,u_nms)) - } - # return - list(shared=sh_pars,specific=mat_sps) + ep <- wQuotes("in ''toParamVec'': ") + if (!is.list(pParams)) stop(ep, 'input must be a list.', call. = FALSE) + if (is.null(pParams$shared) && is.null(pParams$specific)) stop(ep, 'input must have shared or specific components.', call. = FALSE) + + # Create a new list, removing unused components. + value <- list(shared=pParams$shared, specific = pParams$specific) + + c( + value$shared, + setNames( + as.numeric(value$specific), + outer(rownames(value$specific),colnames(value$specific),sprintf,fmt="%s[%s]") + ) + ) } ## Go to list-form pparams from matrix specification @@ -125,7 +73,7 @@ toListPparams <- function( #' \code{toMatrixPparams()} returns an object of class \code{matrix} with the #' model parameters in matrix form. #' @examples -#' toMatrixPparams(pparams(prw)) +#' toMatrixPparams(coef(prw, format = 'list')) #' @export toMatrixPparams <- function(listPparams) { common.params <- listPparams[[which(sapply(listPparams, is.vector))]] diff --git a/R/pfilter.R b/R/pfilter.R index 6252544..949202b 100644 --- a/R/pfilter.R +++ b/R/pfilter.R @@ -92,7 +92,7 @@ setMethod( object <- data # the argument name 'data' is fixed by pomp's generic ep <- wQuotes("in ''pfilter'': ") ## check for params format - if (!missing(params) && is.numeric(params)) params <- pParams(params) + if (!missing(params) && is.numeric(params)) params <- toParamList(params) if (!missing(shared) && !missing(specific) && !missing(params)) stop(ep,wQuotes("specify either ''params'' only, ''params'' and ''shared'' ,", diff --git a/inst/NEWS b/inst/NEWS index 926fb4c..8cb657c 100644 --- a/inst/NEWS +++ b/inst/NEWS @@ -1,5 +1,25 @@ _N_e_w_s _f_o_r _p_a_c_k_a_g_e '_p_a_n_e_l_P_o_m_p' +_C_h_a_n_g_e_s _i_n '_p_a_n_e_l_P_o_m_p' _v_e_r_s_i_o_n _1._3._0: + + • Changed the default ‘cooling.type’ argument in the ‘mif2()’ + function to be ‘"geometric"’ in order to be consistent with + the ‘pomp’ package. + + • Added ‘format’ argument to ‘coef.panelPomp’ method, with + options ‘c("vector", "list")’. This functionality makes the + ‘pparams’ function obsolete, so it has been removed from the + package in this version. + + • Changed the ‘pParams()’ function name to ‘toParamList()’. + + • Modified the ‘toVectorPparams()’ function so that it returns + a vector instead of a ‘data.frame’ object with a single row. + The name of the function was also changed to ‘toParamVec()’, + and is a near inverse of the ‘toParamList()’ function. + + • Updated unit tests and documentation. + _C_h_a_n_g_e_s _i_n '_p_a_n_e_l_P_o_m_p' _v_e_r_s_i_o_n _1._2._0: • Change the generic ‘unitlogLik’ to ‘unitLogLik’ to match diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd index bd40163..0bdaca3 100644 --- a/inst/NEWS.Rd +++ b/inst/NEWS.Rd @@ -1,5 +1,14 @@ \name{NEWS} \title{News for package `panelPomp'} +\section{Changes in \pkg{panelPomp} version 1.3.0}{ + \itemize{ + \item Changed the default \code{cooling.type} argument in the \code{mif2()} function to be \code{"geometric"} in order to be consistent with the \code{pomp} package. + \item Added \code{format} argument to \code{coef.panelPomp} method, with options \code{c("vector", "list")}. This functionality makes the \code{pparams} function obsolete, so it has been removed from the package in this version. + \item Changed the \code{pParams()} function name to \code{toParamList()}. + \item Modified the \code{toVectorPparams()} function so that it returns a vector instead of a \code{data.frame} object with a single row. The name of the function was also changed to \code{toParamVec()}, and is a near inverse of the \code{toParamList()} function. + \item Updated unit tests and documentation. + } +} \section{Changes in \pkg{panelPomp} version 1.2.0}{ \itemize{ \item Change the generic \code{unitlogLik} to \code{unitLogLik} to match camel-case style of package. The original function is deprecated in this version and will be removed in future updates. diff --git a/man/mif2.Rd b/man/mif2.Rd index f23bee2..41e3de3 100644 --- a/man/mif2.Rd +++ b/man/mif2.Rd @@ -17,7 +17,7 @@ start, Np, rw.sd, - cooling.type = c("hyperbolic", "geometric"), + cooling.type = c("geometric", "hyperbolic"), cooling.fraction.50, block = FALSE, verbose = getOption("verbose"), diff --git a/man/panelPomp_methods.Rd b/man/panelPomp_methods.Rd index fb31b24..b1a8bcf 100644 --- a/man/panelPomp_methods.Rd +++ b/man/panelPomp_methods.Rd @@ -7,8 +7,7 @@ \alias{coef<-,panelPomp-method} \alias{length,panelPomp-method} \alias{names,panelPomp-method} -\alias{pparams,panelPomp-method} -\alias{pParams} +\alias{toParamList} \alias{print,panelPomp-method} \alias{show,panelPomp-method} \alias{unit_objects,panelPomp-method} @@ -21,7 +20,7 @@ \alias{shared<-,panelPomp-method} \title{Manipulating \code{panelPomp} objects} \usage{ -\S4method{coef}{panelPomp}(object) +\S4method{coef}{panelPomp}(object, format = c("vector", "list")) \S4method{coef}{panelPomp}(object, ...) <- value @@ -29,9 +28,7 @@ \S4method{names}{panelPomp}(x) -\S4method{pparams}{panelPomp}(object) - -pParams(value) +toParamList(value) \S4method{print}{panelPomp}(x, ...) @@ -57,6 +54,8 @@ pParams(value) \item{object, x}{An object of class \code{panelPomp} or inheriting class \code{panelPomp}.} +\item{format}{the format (data type) of the return value.} + \item{...}{....} \item{value}{value to be assigned.} @@ -64,8 +63,6 @@ pParams(value) \item{start, end}{position in original \code{times(pomp)} at which to start.} \item{i}{unit index (indices) or name (names).} - -\item{format}{the format (data type) of the return value.} } \value{ \code{coef()} returns a \code{numeric} vector. @@ -74,9 +71,7 @@ pParams(value) \code{names()} returns a \code{character} vector. -\pparamsReturn - -\code{pParams()} returns a \code{list} with the model parameters in list form. +\code{toParamList()} returns a \code{list} with the model parameters in list form. \unitobjectsReturn @@ -101,8 +96,7 @@ Tools for manipulating \code{panelPomp} objects. \item{coef<-}{Assign coefficients to \code{panelPomp} objects.} \item{length}{Count the number of units in \code{panelPomp} objects.} \item{names}{Get the unit names of \code{panelPomp} objects.} -\item{pparams}{Extracts coefficients from \code{panelPomp} objects in list form.} -\item{pParams}{Converts panel coefficients from vector form to list form.} +\item{toParamList}{Converts panel coefficients from vector form to list form.} \item{window}{Subset \code{panelPomp} objects by changing start time and end time.} \item{\code{[]}}{Take a subset of units.} @@ -119,10 +113,8 @@ coef(prw) <- c(sigmaX=2,coef(prw)[-1]) coef(prw) length(prw) names(prw) -# extract parameters in list form -pparams(prw) # convert vector-form parameters to list-form parameters -pParams(coef(prw)) +toParamList(coef(prw)) ## summaries of objects print(prw) show(prw) diff --git a/man/params.Rd b/man/params.Rd index 26b2c6a..188445c 100644 --- a/man/params.Rd +++ b/man/params.Rd @@ -2,30 +2,22 @@ % Please edit documentation in R/params.R \name{params} \alias{params} -\alias{toVectorPparams} -\alias{fromVectorPparams} +\alias{toParamVec} \alias{toMatrixPparams} -\title{Convert to and from a \code{panelPomp} object \code{pParams} slot format and a one-row \code{data.frame}} +\title{Manipulating \code{panelPomp} object parameter formats} \usage{ -toVectorPparams(pParams) - -fromVectorPparams(vec_pars) +toParamVec(pParams) toMatrixPparams(listPparams) } \arguments{ -\item{pParams}{A list with the format of the \code{pParams} slot of \code{panelPomp} objects.} - -\item{vec_pars}{A one-row \code{data.frame} with format matching that of the -output of \link{toVectorPparams}.} +\item{pParams}{A list with both shared (vector) and unit-specific (matrix) parameters.} \item{listPparams}{PanelPomp parameters in list format} } \value{ -\code{toVectorPparams()} returns an object of class \code{data.frame}. - -\code{fromVectorPparams()} returns an object of class \code{list} with the -model parameters in list form. +\code{toParamVec()} returns model parameters in vector form. This function +is the inverse of \link{toParamList} \code{toMatrixPparams()} returns an object of class \code{matrix} with the model parameters in matrix form. @@ -35,9 +27,8 @@ These facilitate keeping a record of evaluated log likelihoods. } \examples{ prw <- panelRandomWalk() -toVectorPparams(pparams(prw)) -fromVectorPparams(toVectorPparams(pparams(prw))) -toMatrixPparams(pparams(prw)) +toParamVec(coef(prw, format = 'list')) +toMatrixPparams(coef(prw, format = 'list')) } \author{ Carles \Breto diff --git a/man/pparams.Rd b/man/pparams.Rd deleted file mode 100644 index 251306a..0000000 --- a/man/pparams.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/generics.R -\name{pparams} -\alias{pparams} -\title{Extract parameters (coefficients) of a panel model} -\usage{ -pparams(object, ...) -} -\arguments{ -\item{object}{an object for which extraction of panel model parameter -(coefficient) values is meaningful.} - -\item{...}{additional arguments.} -} -\value{ -Parameter (coefficient) values extracted from the panel model -\code{object}. - -\pparamsReturn -} -\description{ -\code{pparams()} is a generic function that extracts parameter -(coefficient) values from objects returned by panel modeling functions. While -the named \code{numeric} vector format is useful and possible via S4 methods -for \code{coef()}, alternative formats capturing the panel structure can be -implemented via \code{pparams()}. -} -\details{ -This is a generic function: methods can be defined for it. -} -\examples{ -prw <- panelRandomWalk() -# extract parameters in list form -pparams(prw) -} -\seealso{ -\link{panelPomp_methods} -} -\author{ -Carles \Breto -} -\keyword{internal} diff --git a/tests/mif2.R b/tests/mif2.R index 8bb9287..aa3ad04 100644 --- a/tests/mif2.R +++ b/tests/mif2.R @@ -23,11 +23,11 @@ test(wQuotes(ep,"pomp's ''mif2'' error message: in ''mif2'': the following ", "parameter(s), given random walks in ''rw.sd'', are not present ", "in ''params'': ''X.0''. (panelPomp:::mif2.internal)\n"), mif2(panelPomp(unit_objects(ppo)),Np=10,rw.sd=rw_sd(sigmaX=0.05,X.0=0.5), - cooling.fraction.50=0.5,sh=pparams(ppo)$sh)) + cooling.fraction.50=0.5,sh=shared(ppo))) # Testing error message if a parameter is both shared and specific test(wQuotes(ep,"a parameter cannot be both shared and specific!", et), - mif2(panelPomp(unit_objects(ppo),shared=coef(po)),Np=10,sp=pparams(ppo)$sp, + mif2(panelPomp(unit_objects(ppo),shared=coef(po)),Np=10,sp=specific(ppo), rw.sd=rw_sd(sigmaX=0.05,X.0=0.5),cooling.fraction.50=0.5)) ## assign parameters test(# no start (get from object) diff --git a/tests/mif2.Rout.save b/tests/mif2.Rout.save index fc436b1..8d47ce1 100644 --- a/tests/mif2.Rout.save +++ b/tests/mif2.Rout.save @@ -1,7 +1,7 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu +Platform: x86_64-apple-darwin20 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -46,12 +46,12 @@ Type 'q()' to quit R. + "parameter(s), given random walks in ''rw.sd'', are not present ", + "in ''params'': ''X.0''. (panelPomp:::mif2.internal)\n"), + mif2(panelPomp(unit_objects(ppo)),Np=10,rw.sd=rw_sd(sigmaX=0.05,X.0=0.5), -+ cooling.fraction.50=0.5,sh=pparams(ppo)$sh)) ++ cooling.fraction.50=0.5,sh=shared(ppo))) [1] TRUE > > # Testing error message if a parameter is both shared and specific > test(wQuotes(ep,"a parameter cannot be both shared and specific!", et), -+ mif2(panelPomp(unit_objects(ppo),shared=coef(po)),Np=10,sp=pparams(ppo)$sp, ++ mif2(panelPomp(unit_objects(ppo),shared=coef(po)),Np=10,sp=specific(ppo), + rw.sd=rw_sd(sigmaX=0.05,X.0=0.5),cooling.fraction.50=0.5)) [1] TRUE > ## assign parameters diff --git a/tests/mif2_intern.R b/tests/mif2_intern.R index 138dc01..069da29 100644 --- a/tests/mif2_intern.R +++ b/tests/mif2_intern.R @@ -7,7 +7,7 @@ test <- function(expr1,expr2,all="TESTS_PASS",env=parent.frame(),...) pg <- panelGompertz(U=3,N=4) gompertz <- as(pg,"list")[[1]] -coef(gompertz) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +coef(gompertz) <- c(shared(pg), specific(pg)[,1]) shgomp <- gompertz time(shgomp) <- time(gompertz)[1:2] diff --git a/tests/mif2_intern.Rout.save b/tests/mif2_intern.Rout.save index 37d2dca..ec59393 100644 --- a/tests/mif2_intern.Rout.save +++ b/tests/mif2_intern.Rout.save @@ -1,7 +1,7 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu +Platform: x86_64-apple-darwin20 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -26,7 +26,7 @@ Type 'q()' to quit R. > > pg <- panelGompertz(U=3,N=4) > gompertz <- as(pg,"list")[[1]] -> coef(gompertz) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +> coef(gompertz) <- c(shared(pg), specific(pg)[,1]) > > shgomp <- gompertz > time(shgomp) <- time(gompertz)[1:2] diff --git a/tests/panelPomp.R b/tests/panelPomp.R index c28b810..d6ab763 100644 --- a/tests/panelPomp.R +++ b/tests/panelPomp.R @@ -8,7 +8,7 @@ test <- function(expr1,expr2,all="TESTS_PASS",env=parent.frame(),...) pg <- panelGompertz(U=5,N=3) pg <- panelPomp(pg[1:3]) pgl <- as(pg,"list") -g <- pgl[[1]]; coef(g) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +g <- pgl[[1]]; coef(g) <- c(shared(pg), specific(pg)[,1]) coef(g) coef(pg) coef(panelPomp(pg,shared=NULL)) @@ -34,7 +34,7 @@ try(panelPomp(pgl[[1]])) ppo <- panelRandomWalk(U=2,N=5) pos <- as(ppo,"list") -pPs <- pparams(ppo) +pPs <- coef(ppo, format = 'list') all_sh <- c(pPs$sh,get_col(pPs$sp,col=1,rows=seq_along(dim(pPs$sp)[1]))) noparams <- lapply(unit_objects(ppo),pomp,params=numeric(0)) diff --git a/tests/panelPomp.Rout.save b/tests/panelPomp.Rout.save index c0e851f..9de5547 100644 --- a/tests/panelPomp.Rout.save +++ b/tests/panelPomp.Rout.save @@ -1,7 +1,7 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu +Platform: x86_64-apple-darwin20 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -27,7 +27,7 @@ Type 'q()' to quit R. > pg <- panelGompertz(U=5,N=3) > pg <- panelPomp(pg[1:3]) > pgl <- as(pg,"list") -> g <- pgl[[1]]; coef(g) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +> g <- pgl[[1]]; coef(g) <- c(shared(pg), specific(pg)[,1]) > coef(g) r sigma K tau X.0 0.1 0.1 1.0 0.1 1.0 @@ -94,7 +94,7 @@ Error : in ‘panelPomp’: ‘object’ must be either a ‘panelPomp’ object > > ppo <- panelRandomWalk(U=2,N=5) > pos <- as(ppo,"list") -> pPs <- pparams(ppo) +> pPs <- coef(ppo, format = 'list') > all_sh <- c(pPs$sh,get_col(pPs$sp,col=1,rows=seq_along(dim(pPs$sp)[1]))) > > noparams <- lapply(unit_objects(ppo),pomp,params=numeric(0)) diff --git a/tests/panelPomp_methods.R b/tests/panelPomp_methods.R index fdee359..df720cb 100644 --- a/tests/panelPomp_methods.R +++ b/tests/panelPomp_methods.R @@ -14,7 +14,7 @@ ppo <- panelPomp(unit_objects(ppo),shared=pP2$shared,specific=pP2$specific) # other definitions from old test file pg <- panelGompertz(U=3,N=5) pgl <- as(pg,"list") -g <- pgl[[1]]; coef(g) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +g <- pgl[[1]]; coef(g) <- c(shared(pg), specific(pg)[,1]) pp <- panelPomp(list(g,g),shared=pg@shared, specific=pg@specific[,1:2]) @@ -49,24 +49,30 @@ test(coef(ppo) <- ppo@shared,err) test(length(ppo),2L) ## test names,panelPomp-method test(names(ppo),c("rw1","rw2")) -## test pparams,panelPomp-method -test(pparams(ppo),list(shared=ppo@shared,specific=ppo@specific)) +## test coef(..., format = 'list'),panelPomp-method +test(coef(ppo, format = 'list'),list(shared=ppo@shared,specific=ppo@specific)) ## test pParams function ## all sh -test(pParams(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, +test(toParamList(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, value=TRUE,invert=TRUE)]), list(shared=ppo@shared,specific=array(numeric(0),dim=c(0,0)))) ## all sp test(list(shared=numeric(0),specific=ppo@specific), - pParams(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, + toParamList(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, value=TRUE)])) ## both sh & sp -test(pParams(coef(ppo)),list(shared=ppo@shared,specific=ppo@specific)) +test(toParamList(coef(ppo)),list(shared=ppo@shared,specific=ppo@specific)) -# Test error message if pParams used on data.frame +# Test error message if toParamList used on data.frame / list test( - wQuotes("Error : in ''pParams'': ", "input must be a vector.\n"), - pParams(data.frame('par1' = 1, 'par2' = 2)) + wQuotes("Error : in ''toParamList'': ", "input is already a list.\n"), + toParamList(data.frame('par1' = 1, 'par2' = 2)) +) + +# Test error message if toParamList used on matrix +test( + wQuotes("Error : in ''toParamList'': ", "input must be a vector.\n"), + toParamList(matrix(c(1, 2, 3))) ) ## test unit_objects,panelPomp-method diff --git a/tests/panelPomp_methods.Rout.save b/tests/panelPomp_methods.Rout.save index 768b627..02006d1 100644 --- a/tests/panelPomp_methods.Rout.save +++ b/tests/panelPomp_methods.Rout.save @@ -1,7 +1,7 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu +Platform: x86_64-apple-darwin20 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -33,7 +33,7 @@ Type 'q()' to quit R. > # other definitions from old test file > pg <- panelGompertz(U=3,N=5) > pgl <- as(pg,"list") -> g <- pgl[[1]]; coef(g) <- c(pparams(pg)$sh, pparams(pg)$sp[,1]) +> g <- pgl[[1]]; coef(g) <- c(shared(pg), specific(pg)[,1]) > pp <- panelPomp(list(g,g),shared=pg@shared, + specific=pg@specific[,1:2]) > @@ -80,28 +80,35 @@ Type 'q()' to quit R. > ## test names,panelPomp-method > test(names(ppo),c("rw1","rw2")) [1] TRUE -> ## test pparams,panelPomp-method -> test(pparams(ppo),list(shared=ppo@shared,specific=ppo@specific)) +> ## test coef(..., format = 'list'),panelPomp-method +> test(coef(ppo, format = 'list'),list(shared=ppo@shared,specific=ppo@specific)) [1] TRUE > ## test pParams function > ## all sh -> test(pParams(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, +> test(toParamList(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, + value=TRUE,invert=TRUE)]), + list(shared=ppo@shared,specific=array(numeric(0),dim=c(0,0)))) [1] TRUE > ## all sp > test(list(shared=numeric(0),specific=ppo@specific), -+ pParams(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, ++ toParamList(coef(ppo)[grep("^.+\\[.+?\\]$",names(coef(ppo)),perl=TRUE, + value=TRUE)])) [1] TRUE > ## both sh & sp -> test(pParams(coef(ppo)),list(shared=ppo@shared,specific=ppo@specific)) +> test(toParamList(coef(ppo)),list(shared=ppo@shared,specific=ppo@specific)) [1] TRUE > -> # Test error message if pParams used on data.frame +> # Test error message if toParamList used on data.frame / list > test( -+ wQuotes("Error : in ''pParams'': ", "input must be a vector.\n"), -+ pParams(data.frame('par1' = 1, 'par2' = 2)) ++ wQuotes("Error : in ''toParamList'': ", "input is already a list.\n"), ++ toParamList(data.frame('par1' = 1, 'par2' = 2)) ++ ) +[1] TRUE +> +> # Test error message if toParamList used on matrix +> test( ++ wQuotes("Error : in ''toParamList'': ", "input must be a vector.\n"), ++ toParamList(matrix(c(1, 2, 3))) + ) [1] TRUE > diff --git a/tests/params.R b/tests/params.R index 14305c1..aff8a33 100644 --- a/tests/params.R +++ b/tests/params.R @@ -11,20 +11,27 @@ u <- 5 # at least 1 sp <- matrix( as.numeric(paste0(rep(seq(3,u+2),each=2),rep(c(".1",".2"),times=u))), nrow=2, - dimnames=list(c("spec.1","spec.2"),c(paste0("unit.",1:u))) + dimnames=list(param = c("spec.1","spec.2"),unit = c(paste0("unit.",1:u))) +) + +out_null_sp <- toParamList( + toParamVec( + list(shared=sh,specific=array( + numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp))) + ) + ) ) ## recovec pParams (shared parameters only) -test(list(shared=sh,specific=array( - numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp)))), - fromVectorPparams(toVectorPparams(list(shared=sh,specific=array( - numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp))))))) +test(all(dim(out_null_sp$specific) == c(0, 0))) +test(out_null_sp$shared, sh) + ## recovec pParams (specific parameters only) test(list(shared=numeric(0),specific=sp), - fromVectorPparams(toVectorPparams(list(shared=numeric(0),specific=sp)))) + toParamList(toParamVec(list(shared=numeric(0),specific=sp)))) ## recovec pParams (both) test(list(shared=sh,specific=sp), - fromVectorPparams(toVectorPparams(list(shared=sh,specific=sp)))) + toParamList(toParamVec(list(shared=sh,specific=sp)))) ## tolspPs and toMatrixPparams work (shared parameters only) list(shared=sh,specific=array(numeric(0),dim=c(0,ncol(sp)), @@ -57,6 +64,8 @@ res <- panelPomp:::toListPparams( vector.name.in.listPparams=vector.name.in.listPparams, matrix.name.in.listPparams=matrix.name.in.listPparams ) +dimnames(res$specific) <- list(param=rownames(res$specific), unit=colnames(res$specific)) + test(res,listPparams) ## tolistPparams and toMatrixPparams work (both) @@ -75,6 +84,21 @@ res <- panelPomp:::toListPparams( vector.name.in.listPparams=vector.name.in.listPparams, matrix.name.in.listPparams=matrix.name.in.listPparams ) +dimnames(res$specific) <- list(param=rownames(res$specific), unit=colnames(res$specific)) + + +ep <- "Error : in ''toParamVec'': " +## test checks for missing arguments in panelPomp function +test( + wQuotes(ep,"input must be a list.\n"), + toParamVec(c(1, 2, 3)) +) + +test( + wQuotes(ep, 'input must have shared or specific components.\n'), + toParamVec(list(a=c(1, 2, 3))) +) + test(res,listPparams) ## check whether all tests passed diff --git a/tests/params.Rout.save b/tests/params.Rout.save index 0f58b72..db810ee 100644 --- a/tests/params.Rout.save +++ b/tests/params.Rout.save @@ -1,5 +1,5 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-apple-darwin20 @@ -30,22 +30,30 @@ Type 'q()' to quit R. > sp <- matrix( + as.numeric(paste0(rep(seq(3,u+2),each=2),rep(c(".1",".2"),times=u))), + nrow=2, -+ dimnames=list(c("spec.1","spec.2"),c(paste0("unit.",1:u))) ++ dimnames=list(param = c("spec.1","spec.2"),unit = c(paste0("unit.",1:u))) ++ ) +> +> out_null_sp <- toParamList( ++ toParamVec( ++ list(shared=sh,specific=array( ++ numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp))) ++ ) ++ ) + ) > > ## recovec pParams (shared parameters only) -> test(list(shared=sh,specific=array( -+ numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp)))), -+ fromVectorPparams(toVectorPparams(list(shared=sh,specific=array( -+ numeric(0),dim=c(0,ncol(sp)),dimnames=list(NULL,colnames(sp))))))) +> test(all(dim(out_null_sp$specific) == c(0, 0))) [1] TRUE +> test(out_null_sp$shared, sh) +[1] TRUE +> > ## recovec pParams (specific parameters only) > test(list(shared=numeric(0),specific=sp), -+ fromVectorPparams(toVectorPparams(list(shared=numeric(0),specific=sp)))) ++ toParamList(toParamVec(list(shared=numeric(0),specific=sp)))) [1] TRUE > ## recovec pParams (both) > test(list(shared=sh,specific=sp), -+ fromVectorPparams(toVectorPparams(list(shared=sh,specific=sp)))) ++ toParamList(toParamVec(list(shared=sh,specific=sp)))) [1] TRUE > > ## tolspPs and toMatrixPparams work (shared parameters only) @@ -80,6 +88,8 @@ Type 'q()' to quit R. + vector.name.in.listPparams=vector.name.in.listPparams, + matrix.name.in.listPparams=matrix.name.in.listPparams + ) +> dimnames(res$specific) <- list(param=rownames(res$specific), unit=colnames(res$specific)) +> > test(res,listPparams) [1] TRUE > @@ -99,6 +109,23 @@ Type 'q()' to quit R. + vector.name.in.listPparams=vector.name.in.listPparams, + matrix.name.in.listPparams=matrix.name.in.listPparams + ) +> dimnames(res$specific) <- list(param=rownames(res$specific), unit=colnames(res$specific)) +> +> +> ep <- "Error : in ''toParamVec'': " +> ## test checks for missing arguments in panelPomp function +> test( ++ wQuotes(ep,"input must be a list.\n"), ++ toParamVec(c(1, 2, 3)) ++ ) +[1] TRUE +> +> test( ++ wQuotes(ep, 'input must have shared or specific components.\n'), ++ toParamVec(list(a=c(1, 2, 3))) ++ ) +[1] TRUE +> > test(res,listPparams) [1] TRUE > diff --git a/tests/pfilter.R b/tests/pfilter.R index b1b7ecd..88c842d 100644 --- a/tests/pfilter.R +++ b/tests/pfilter.R @@ -8,7 +8,7 @@ test <- function(expr1,expr2,all="TESTS_PASS",env=parent.frame(),...) ppo <- panelRandomWalk(U=1,N=7) pos <- as(ppo,"list") po <- pos[[1]] -pPs <- pparams(ppo) +pPs <- coef(ppo, format = 'list') ep <- wQuotes("Error : in ''pfilter'': ") @@ -18,7 +18,7 @@ test(wQuotes(ep,"''data'' is a required argument.\n"), pfilter(params=coef(ppo),Np=10)) test(wQuotes(ep,"names of ''shared'' must match those of ", "''object@shared''.\n"), - pfilter(panelPomp(unit_objects(ppo)),sh=pparams(ppo)$sh,Np=10)) + pfilter(panelPomp(unit_objects(ppo)),sh=shared(ppo),Np=10)) test(wQuotes(ep,"Missing ''Np'' argument.\n"),pfilter(ppo)) # Testing error message if params argument is list without shared / specific elements @@ -75,7 +75,7 @@ set.seed(21125715L) ppf_<-pfilter(ppo,params=list(shared=ppo@shared,specific=ppo@specific),Np=10) test(logLik(ppf),logLik(ppf_)) numeric_names <- setNames(rep(1,3),c(names(ppo@shared),"X.0[rw1]")) -test(pPs,pParams(numeric_names)) +test(pPs,toParamList(numeric_names)) set.seed(21125715L) ppf__<-pfilter(ppo,params=numeric_names,Np=10) test(logLik(ppf),logLik(ppf__)) @@ -102,15 +102,15 @@ test(names(as(ppf,"data.frame")),c("t", "Y", "ess", "cond.logLik", "unit")) g <- panelGompertz(U=10,N=3) ## check that previously broken code runs without error g0 <- pfilter(g, Np=10, - shared=pParams(coef(g))$shared, - specific=pParams(coef(g))$specific) + shared=shared(g), + specific=specific(g)) ## a longer stronger test long_test <- FALSE if(long_test){ set.seed(12323218) g1 <- pfilter(g, Np=10000, - shared=pParams(coef(g))$shared, - specific=pParams(coef(g))$specific) + shared=shared(g), + specific=specific(g)) g2 <- pfilter(g, Np=10000) test(abs(logLik(p1)-logLik(p2))<0.2, TRUE) } diff --git a/tests/pfilter.Rout.save b/tests/pfilter.Rout.save index aa0f759..fb6e759 100644 --- a/tests/pfilter.Rout.save +++ b/tests/pfilter.Rout.save @@ -1,7 +1,7 @@ -R version 4.4.0 (2024-04-24) -- "Puppy Cup" +R version 4.4.1 (2024-06-14) -- "Race for Your Life" Copyright (C) 2024 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu +Platform: x86_64-apple-darwin20 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -27,7 +27,7 @@ Type 'q()' to quit R. > ppo <- panelRandomWalk(U=1,N=7) > pos <- as(ppo,"list") > po <- pos[[1]] -> pPs <- pparams(ppo) +> pPs <- coef(ppo, format = 'list') > > ep <- wQuotes("Error : in ''pfilter'': ") > @@ -39,7 +39,7 @@ Type 'q()' to quit R. [1] TRUE > test(wQuotes(ep,"names of ''shared'' must match those of ", + "''object@shared''.\n"), -+ pfilter(panelPomp(unit_objects(ppo)),sh=pparams(ppo)$sh,Np=10)) ++ pfilter(panelPomp(unit_objects(ppo)),sh=shared(ppo),Np=10)) [1] TRUE > test(wQuotes(ep,"Missing ''Np'' argument.\n"),pfilter(ppo)) [1] TRUE @@ -110,7 +110,7 @@ Type 'q()' to quit R. > test(logLik(ppf),logLik(ppf_)) [1] TRUE > numeric_names <- setNames(rep(1,3),c(names(ppo@shared),"X.0[rw1]")) -> test(pPs,pParams(numeric_names)) +> test(pPs,toParamList(numeric_names)) [1] TRUE > set.seed(21125715L) > ppf__<-pfilter(ppo,params=numeric_names,Np=10) @@ -144,15 +144,15 @@ Type 'q()' to quit R. > g <- panelGompertz(U=10,N=3) > ## check that previously broken code runs without error > g0 <- pfilter(g, Np=10, -+ shared=pParams(coef(g))$shared, -+ specific=pParams(coef(g))$specific) ++ shared=shared(g), ++ specific=specific(g)) > ## a longer stronger test > long_test <- FALSE > if(long_test){ + set.seed(12323218) + g1 <- pfilter(g, Np=10000, -+ shared=pParams(coef(g))$shared, -+ specific=pParams(coef(g))$specific) ++ shared=shared(g), ++ specific=specific(g)) + g2 <- pfilter(g, Np=10000) + test(abs(logLik(p1)-logLik(p2))<0.2, TRUE) + } diff --git a/vignettes/articles/package_tutorial/bg.bib b/vignettes/articles/package_tutorial/bg.bib new file mode 100644 index 0000000..2a00f00 --- /dev/null +++ b/vignettes/articles/package_tutorial/bg.bib @@ -0,0 +1,1073 @@ +@string{AOAS="Annals of Applied Statistics"} +@string{AOAP="Annals of Applied Probability"} +@string{AOS="Annals of Statistics"} +@string{AICEJ="American Institute of Chemical Engineers Journal"} +@string{BWHO="Bulletin of the World Health Organization"} +@string{JRSSB="Journal of the Royal Statistical Society, Series B (Statistical Methodology)"} +@string{JRSSC="Journal of the Royal Statistical Society: Series C (Applied Statistics)"} +@string{JAMA="Journal of the American Medical Association"} +@string{JASA="Journal of the American Statistical Association"} +@string{JAP="Journal of Applied Probability"} +@string{JCGS="Journal of Computational and Graphical Statistics"} +@string{JN="Journal of Neuroscience"} +@string{AJTMH="American Journal of Tropical Medicine and Hygiene"} +@string{JNE="Journal of Neural Engineering"} +@string{CH="Chapman and Hall"} +@string{OUP="Oxford University Press"} +@string{PUP="Princeton University Press"} +@string{CUP="Cambridge University Press"} +@string{CMB="Clinical Microbiology Reviews"} +@string{IJE="International Journal of Epidemiology"}, +@string{JBES="Journal of Business and Economic Statistics"} +@string{JRSI="Journal of the Royal Society Interface"} +@string{TSP="IEEE Transactions on Signal Processing"} +@string{MBE="Molecular Biology and Evolution"} +@string{MWR="Monthly Weather Review"} +@string{PLoS-Med="PLoS Medicine"} +@string{PLoS-Bio="PLoS Biology"} +@string{PLoS-CompBio="PLoS Computational Biology"} +@string{PLoS-Path="PLoS Pathogens"} +@string{PLMS="Proceedings of the London Mathematical Society"} +@string{PRSLA="Proceedings of the Royal Society of London, Series A"} +@string{PRSLB="Proceedings of the Royal Society of London, Series B"} +@string{PRSB="Proceedings of the Royal Society of London, Series B"} +@string{AJE="American Journal of Epidemiology"} +@string{AmNat="American Naturalist"} +@string{PNAS="Proceedings of the National Academy of Sciences of the USA"} +@string{AnnStat="Annals of Statistics"} +@string{AppStat="Applied Statistics"} +@string{JMB="Journal of Mathematical Biology"} +@string{SC="Statistics and Computing"} +@string{SM="Statistics in Medicine"} +@string{SPL="Statistics and Probability Letters"} +@string{SS="Statistica Sinica"} +@string{StatSci={Statistical Science}} +@string{AnnMathStat="Annals of Mathematical Statistics"} +@string{JC="Journal of Climate"} +@string{AIChEJ="American Institute of Chemical Engineers Journal"} +@string{TRSTMH="Transactions of the Royal Society of Tropical Medicine and Hygiene"} + + +AAAAAAAAAAAAAAA + +@article {andrieu10, + AUTHOR = {Andrieu, Christophe and Doucet, Arnaud and Holenstein, Roman}, + TITLE = {Particle {M}arkov chain {M}onte {C}arlo methods}, + XJOURNAL = {J. R. Stat. Soc. Ser. B Stat. Methodol.}, + Journal = JRSSB, + FJOURNAL = {Journal of the Royal Statistical Society. Series B. + Statistical Methodology}, + VOLUME = {72}, + YEAR = {2010}, + NUMBER = {3}, + PAGES = {269--342}, + ISSN = {1369-7412}, + DOI = {10.1111/j.1467-9868.2009.00736.x}, +} + +@article{asfaw21, + title={Partially observed Markov processes with spatial structure via the R package spatPomp}, + author={Asfaw, Kidus and Park, Joonha and King, Aaron A and Ionides, Edward L}, + journal={arXiv preprint arXiv:2101.01157}, + year={2021} +} + +BBBBBBBBBBBBBBB + +@book(barndorff94, +author = "Barndorff-Nielsen, O. E. and Cox, D. R.", +year = 1994, +title = "Inference and Asymptotics", +publisher = CH, +address = "London" +) + +@book{bartolucci12, + title={Latent {M}arkov models for longitudinal data}, + author={Bartolucci, Francesco and Farcomeni, Alessio and Pennoni, Fulvia}, + year={2012}, + publisher={CRC Press} +} + +@article{barton07,doi = {10.1093/toxsci/kfm100}, +author = {Barton, Hugh A. and Chiu, Weihsueh A. and Woodrow Setzer, R. and Andersen, Melvin E. and Bailer, A. John and Bois, Frédéric Y. and DeWoskin, Robert S. and Hays, Sean and Johanson, Gunnar and Jones, Nancy and Loizou, George and MacPhail, Robert C. and Portier, Christopher J. and Spendiff, Martin and Tan, Yu-Mei}, +title = {Characterizing Uncertainty and Variability in Physiologically Based Pharmacokinetic Models: State of the Science and Needs for Research and Implementation}, +volume = {99}, +number = {2}, +pages = {395-402}, +year = {2007}, +journal = {Toxicological Sciences} +} + +@Incollection{bengtsson08, Doi={10.1007/978-3-642-01094-1}, +Author = "Bengtsson, T. and Bickel, P. and Li, B.", +Year = 2008, +Title = "Curse-of-dimensionality revisited: Collapse of the particle filter in very large scale systems", +Booktitle="Probability and Statistics: Essays in Honor of {D}avid {A}. {F}reedman", +Editor={Speed, T. and Nolan, D.}, +Pages = "316--334", +Publisher="Institute of Mathematical Statistics", +Address="Beachwood, OH" +} + + +@article(bjornstad01, doi="10.1126/science.1062226", +author = "Bj{\o}rnstad, O. N. and Grenfell, B. T.", +year = 2001, +title = "Noisy Clockwork: Time Series Analysis of Population Fluctuations in Animals", +journal = "Science", +volume = 293, +pages = "638--643" +) + +@article{bjornstad02, + author = {Bj{\o}rnstad, Ottar N. and Finkenst\"{a}dt, Barbel F. and Grenfell, Bryan T.}, + journal = {Ecological Monographs}, + xmonth = {may}, + xnumber = {2}, + pages = {169--184}, + publisher = {Ecological Society of America}, + title = {Dynamics of Measles Epidemics: Estimating Scaling of Transmission Rates Using a Time Series {SIR} Model}, + xurl = {http://links.jstor.org/sici?sici=0012-9615%28200205%2972%3A2%3C169%3ADOMEES%3E2.0.CO%3B2-S}, + volume = {72}, + year = {2002}, +} + + @article{blower95, url = {http://www.jstor.org/stable/3702385}, + title = {An Analysis of the Process of Human Immunodeficiency Virus Sexual Risk Behavior Change}, + author = {Blower, Sally M. and Griensven, Gotfried J. P. van and Kaplan, Edward H.}, + journal = {Epidemiology}, + volume = {6}, + number = {3}, + pages = {238--242}, + year = {1995}, + } + +@article{bossy04, + title={A symmetrized {E}uler scheme for an efficient approximation of reflected diffusions}, + author={Bossy, M. and Gobet, E. and Talay, D.}, + journal=JAP, + volume={41}, + number={3}, + pages={877--889}, + year={2004}, +} + +@Article{breto09, doi={10.1214/08-AOAS201}, +text={Breto, C., D. He, E. L. Ionides and A. A. King (2009). Time series analysis via mechanistic models. Annals of Applied Statistics 3:319--348.}, +Author = {Bret\'{o}, C. and He, D. and Ionides, E. L. and King, A. A.}, +Year = {2009}, +Title = {Time Series Analysis via Mechanistic Models}, +Journal = {Annals of Applied Statistics}, +Volume = {3}, +Pages = {319--348} +} + +@article(breto11, doi={10.1016/j.spa.2011.07.005},text={Breto, C. and Ionides, E.L. (2011) Compound Markov counting processes and their applications to modeling infinitesimally over-dispersed systems. Stochastic Processes and their Applications 121:2571-2591.}, +author = "Bret\'{o}, C. and Ionides, E. L.", +year = 2011, +title = "Compound {M}arkov counting processes and their applications to modeling infinitesimally over-dispersed systems", +journal = "Stochastic Processes and their Applications", +volume = "121", +pages = "2571--2591" +) + +@Article{breto14,doi = "10.1016/j.spl.2014.04.003",text={Breto, C. (2014). On idiosyncratic stochasticity of financial leverage effects. Statistics & Probability Letters 91:20-26.}, +Title = "On idiosyncratic stochasticity of financial leverage effects", +Journal = "Statistics \& Probability Letters ", +Volume = "91", +Pages = "20--26", +Year = "2014", +issn = "0167-7152", +xurl = "http://www.sciencedirect.com/science/article/pii/S0167715214001345", +Author = "Carles Bret\'{o}", +} + +@article{breto18, + year={2018}, + doi = {10.1214/17-STS636}, + title={Modeling and inference for infectious disease dynamics: a likelihood-based approach}, + author={Bret{\'o}, Carles}, + journal={Statistical Science}, + xjournal=StatSci, + number = {1}, + pages = {57--69}, + volume = {33} +} + +@article{breto19, doi={10.1080/01621459.2019.1604367}, + title={Panel data analysis via mechanistic models}, + author={Bret{\'o}, Carles and Ionides, Edward L and King, Aaron A}, + journal={Journal of the American Statistical Association, pre-published online}, + xvolume={pre-published online}, + pages={1--35}, + year={2019}, + publisher={Taylor \& Francis} +} + + +CCCCCCCCCCCCCC + +@article(cauchemez04, doi="10.1002/sim.1912", +author = "Cauchemez, S. and Carrat, F. and Viboud, C. and Valleron, A. J. and Boelle, P. Y.", +year = 2004, +title = "A {B}ayesian {MCMC} approach to study transmission of influenza: Application to household longitudinal data", +journal = SM, +volume = 23, +pages = "3469--3487" +) + +@article {cauchemez07-jasa, doi = "doi:10.1198/016214506000000230", + author = "Cauchemez, Simon and Temime, Laura and Guillemot, Didier and Varon, Emmanuelle and Valleron, Alain-Jacques and Thomas, Guy and Boelle, Pierre-Yves", + title = "Investigating Heterogeneity in Pneumococcal Transmission: A {B}ayesian {MCMC} Approach Applied to a Follow-up of Schools", + journal = JASA, + volume = 101, + year = 2006, + pages = "946--958" +} + +@Book{chambers98, + title = {Programming with Data}, + author = {John Chambers}, + publisher = {Springer-Verlag}, + year = {1998}, + address = {New York} +} + +@article{chen16, + title={Analyzing single-molecule protein transportation experiments via hierarchical hidden {M}arkov models}, + author={Chen, Yang and Shen, Kuang and Shan, Shu-Ou and Kou, Samuel}, + journal=JASA, + xjournal={Journal of the American Statistical Association}, + volume={111}, + number={515}, + pages={951--966}, + year={2016}, + publisher={Taylor \& Francis} +} + + +@article{croissant08, + title={Panel data econometrics in {R}: The plm package}, + author={Croissant, Yves and Millo, Giovanni and others}, + journal={Journal of Statistical Software}, + doi={10.18637/jss.v027.i02}, + volume={27}, + number={2}, + pages={1--43}, + year={2008} +} + +DDDDDDDDDDDDDD + +@article{dai16, + title={Using Convolution to Estimate the Score Function for Intractable State-Transition Models}, + author={Dai, Liang and Sch{\"o}n, Thomas B}, + journal={IEEE Signal Processing Letters}, + volume={23}, + number={4}, + pages={498--501}, + year={2016}, + publisher={IEEE} +} + +@book{delmoral04, + author = {Del Moral, P.}, + year = {2004}, + title = {{F}eynman-{K}ac Formulae: Genealogical and Interacting Particle Systems with Applications}, + publisher = {Springer}, + address = {New York} +} + +@article{delmoral01, +title = "On the stability of interacting processes with applications to filtering and genetic algorithms", +journal = "Annales de l'Institut Henri Poincar\'{e} (B) Probability and Statistics", +volume = "37", +number = "2", +pages = "155--194", +year = "2001", +issn = "0246-0203", +doi = "https://doi.org/10.1016/S0246-0203(00)01064-5", +author = "Del Moral, Pierre and Guionnet, Alice" +} + +@article{dempster77, + title={Maximum likelihood from incomplete data via the {EM} algorithm}, + author={Dempster, Arthur P and Laird, Nan M and Rubin, Donald B}, + xjournal={Journal of the royal statistical society. Series B (methodological)}, + journal=JRSSB, + pages={1--38}, + year={1977} +} + +@article{dobson14, + title={Mathematical models for emerging disease}, + author={Dobson, Andy}, + journal={Science}, + volume={346}, + number={6215}, + pages={1294--1295}, + year={2014}, + publisher={American Association for the Advancement of Science} +} + +@article{domeyer22, + title = {Driver-pedestrian perceptual models demonstrate coupling: implications for vehicle automation}, + author = {Domeyer, Joshua E and Lee, John D and Toyoda, Heishiro and Mehler, Bruce and Reimer, Bryan}, + journal = {IEEE Transactions on Human-Machine Systems}, + volume = {52}, + number = {4}, + pages = {557--566}, + year = {2022}, + publisher = {IEEE}, + link = {https://doi.org/10.1109/THMS.2022.3158201} +} + +@article{donnet13, + title={A review on estimation of stochastic differential equations for pharmacokinetic/pharmacodynamic models}, + author={Donnet, Sophie and Samson, Adeline}, + journal={Advanced Drug Delivery Reviews}, + volume={65}, + number={7}, + pages={929--939}, + year={2013}, + publisher={Elsevier} +} + +@book{douc14, + title={Nonlinear time series: Theory, methods and applications with {R} examples}, + author={Douc, Randal and Moulines, Eric and Stoffer, David}, + year={2014}, + publisher={CRC Press} +} + +@Article{doucet15, +xUrl={http://arxiv.org/abs/1304.5768}, +Author = {Doucet, A. and Jacob, P. E. and Rubenthaler, S.}, +Year = {2015}, +Title = {Derivative-Free Estimation of the Score Vector and Observed Information Matrix with Application to State-Space Models}, +Journal = {Arxiv:1304.5768v3}, +Volume = {}, +xPages = {http://arxiv.org/abs/1304.5768} +} + +@Article{doucet15pmcmc, + doi={10.1093/biomet/asu075}, + rmdCite={@doucet15}, + Title={Efficient implementation of {M}arkov chain {M}onte {C}arlo when using an unbiased likelihood estimator}, + Author={Doucet, Arnaud and Pitt, M. K. and Deligiannidis, George and Kohn, Robert}, + Journal={Biometrika}, + Volume={102}, + Pages={295-313}, + Year={2015}, + Publisher={Biometrika Trust} +} + +@article{dunson07-smmr,doi = {10.1177/0962280206075309}, +author = {Dunson, David B.}, +title = {Bayesian methods for latent trait modelling of longitudinal data}, +volume = {16}, +number = {5}, +pages = {399-415}, +year = {2007}, +journal = {Statistical Methods in Medical Research} +} + +EEEEEEEEEEEEEEE + +FFFFFFFFFFFFFFF + +@Article{fasiolo16, +Author = {Fasiolo, Matteo and Pya, Natalya and Wood, Simon N.}, +Doi = {10.1214/15-STS534}, +Fjournal = {Statistical Science}, +Journal = StatSci, +Month = {02}, +Number = {1}, +Pages = {96--118}, +Publisher = {The Institute of Mathematical Statistics}, +Title = {A Comparison of Inferential Methods for Highly Nonlinear State Space Models in Ecology and Epidemiology}, +Volume = {31}, +Year = {2016} +} + +@article{fitzjohn20, + title={Reproducible parallel inference and simulation of stochastic state space models using odin, dust, and mcstate}, + author={FitzJohn, Richard G and Knock, Edward S and Whittles, Lilith K and Perez-Guzman, Pablo N and Bhatia, Sangeeta and Guntoro, Fernando and Watson, Oliver J and Whittaker, Charles and Ferguson, Neil M and Cori, Anne and others}, + journal={Wellcome Open Research}, + volume={5}, + year={2020}, + publisher={The Wellcome Trust} +} + +GGGGGGGGGGGGG + +@article{gelfand90, + title={Sampling-based approaches to calculating marginal densities}, + author={Gelfand, Alan E and Smith, Adrian FM}, + xjournal={Journal of the American Statistical Association}, + journal=JASA, + volume={85}, + number={410}, + pages={398--409}, + year={1990}, + publisher={Taylor \& Francis Group} +} + +@Misc{genolini08, + title = {A (Not So) Short Introduction to \proglang{S}4}, + author = {Genolini, Christophe}, + howpublished = {\proglang{R} Foundation for Statistical Computing}, + year = {2008}, + url = {https://CRAN.R-project.org/doc/contrib/Genolini-S4tutorialV0-5en.pdf} +} + +@article{gillespie2001, + author = "Gillespie, Daniel T.", + title = "Approximate accelerated stochastic simulation of chemically reacting systems", + journal = "The Journal of Chemical Physics", + year = "2001", + volume = "115", + number = "4", + pages = "1716--1733", + doi = "http://dx.doi.org/10.1063/1.1378322" +} + +@ARTICLE{gillespie77, + author = {D. T. Gillespie}, + title = {Exact stochastic simulation of coupled chemical reactions}, + journal = {Journal of Physical Chemistry}, + year = {1977}, + volume = {81}, + pages = {2340--2361} +} + +@ARTICLE{grenfell02, + author = {B. T. Grenfell and O. N. Bj{\o}rnstad and B. F. Finkenstadt}, + title = {Dynamics of measles epidemics: Scaling noise, determinism, and predictability + with the {TSIR} model}, + journal = {Ecological Monographs}, + year = {2002}, + volume = {72}, + pages = {185--202} +} + +HHHHHHHHHHHH + +@article{halekoh06, + title={The {R} package geepack for generalized estimating equations}, + author={Halekoh, Ulrich and H{\o}jsgaard, S{\o}ren and Yan, Jun}, + journal={Journal of Statistical Software}, + doi={10.18637/jss.v015.i02}, + volume={15}, + number={2}, + pages={1--11}, + year={2006} +} + +@article{hastings70, + title={Monte {C}arlo sampling methods using {M}arkov chains and their applications}, + author={Hastings, W Keith}, + journal={Biometrika}, + volume={57}, + number={1}, + pages={97--109}, + year={1970}, + publisher={Biometrika Trust} +} + +@Article{he10, doi="10.1098/rsif.2009.0151",text={He, D., Ionides, E. L. and King, A. A. (2010). Plug-and-play inference for disease dynamics: Measles in large and small towns as a case study. J. Roy. Soc. Interface 7:271-283}, +Author = "He, D. and Ionides, E. L. and King, A. A.", +Year = 2010, +Title = "Plug-and-play inference for disease dynamics: + Measles in large and small towns as a case study", +Journal = JRSI, +xJournal = "Journal of the Royal Society Interface", +Volume = "7", +Pages = "271--283 " +} + +@article{heiss08, + title={Sequential numerical integration in nonlinear state space models for microeconometric panel data}, + author={Heiss, Florian}, + journal={Journal of Applied Econometrics}, + volume={23}, + number={3}, + pages={373--389}, + year={2008}, + publisher={Wiley Online Library} +} + +@book{hsiao14, + title={Analysis of Panel Data}, + author={Hsiao, Cheng}, + editition={3rd}, + year={2014}, + publisher=CUP, + address={Cambridge, UK} +} + +IIIIIIIIIIIIII + +@Article{ionides06,doi="10.1073/pnas.0603181103", +Author = "Ionides, E. L. and Bret\'{o}, C. and King, A. A.", +Year = 2006, +Title = "Inference for nonlinear dynamical systems", +Journal = PNAS, +xJournal = "Proceedings of the National Academy of Sciences of the USA", +Volume = "103", +Pages = "18438--18443" +} + +@Article{ionides11,doi="10.1214/11-AOS886", + Author = {Ionides, Edward L. and Bhadra, Anindya and Atchad\'{e}, Y. and King, Aaron A.}, + Title = {Iterated Filtering}, + Year = 2011, + Journal = "Annals of Statistics", + Volume = 39, + Pages = "1776--1802" +} + +@Article{ionides11-statSci, doi="10.1214/11-STS345C",text={Ionides, E. L. (2011). Discussion on "Feature Matching in Time Series Modeling" by Y. Xia and H. Tong. Statistical Science 26:49-52. http://dx.doi.org/10.1214/11-STS345C}, +Author = "E. L. Ionides", +Year = 2011, +Title = {Discussion on ``{F}eature Matching in Time Series Modeling'' by {Y}. {X}ia and {H}. {T}ong}, +Journal = "Statistical Science", +Volume = "26", +Pages = "49--52" +} + +@Article{ionides15,text={Ionides, EL, D Nguyen, Y Atchade, S Stoev and AA King. (2015). Inference for dynamic and latent variable models via iterated, perturbed Bayes maps. PNAS 112:719-724.}, +Author = {Ionides, Edward L. and Nguyen, Dao and Atchad\'{e}, Yves and Stoev, Stilian and King, Aaron A.}, +Title = {Inference for dynamic and latent variable models via iterated, perturbed {B}ayes maps}, +Year = {2015}, +doi = {10.1073/pnas.1410597112}, +Journal = PNAS, +xJournal = {Proceedings of the National Academy of Sciences of USA}, +Volume = 112, +Number = 3, +Pages = "719--724" +} + +@Article{ionides17-tas, + Author = {Ionides, E. L. and Giessing, A. and Ritov, Y. and Page, S. E.}, + Title = {Response to the {ASA}'s statement on p-values: Context, process, and purpose}, + Journal = {The American Statistician}, + Volume = {71}, + xNumber = {1}, + Pages = {88-89}, + Year = {2017}, + doi = {10.1080/00031305.2016.1234977}, +} + +@article{ionides17, + title={Monte {C}arlo profile confidence intervals for dynamic systems}, + author={Ionides, Edward L and Breto, Carles and Park, Joonha and Smith, Richard A and King, Aaron A}, + xjournal={Journal of the Royal Society Interface}, + journal=JRSI, + doi={10.1098/rsif.2017.0126}, + volume = {14}, + xnumber = {132}, + pages={1--10}, + year={2017} +} + +JJJJJJJJJJJJ + +@article{jacob15, + title={Sequential {B}ayesian inference for implicit hidden {M}arkov models and current limitations}, + author={Jacob, Pierre E}, + journal={ESAIM: Proceedings and Surveys}, + volume={51}, + pages={24--48}, + year={2015}, + publisher={EDP Sciences} +} + +@book{jacquez96, + title = {Compartmental Analysis in Biology and Medicine}, + publisher = {3rd edition, BioMedware, Ann Arbor, MI}, + year = {1996}, + author = {Jacquez, J. A.} +} + +KKKKKKKKKK + +@article{kantas15, + AUTHOR = {Kantas, Nikolas and Doucet, Arnaud and Singh, Sumeetpal S. and + Maciejowski, Jan and Chopin, Nicolas}, + TITLE = {On particle methods for parameter estimation in state-space + models}, + JOURNAL = {Statist. Sci.}, + FJOURNAL = {Statistical Science. A Review Journal of the Institute of + Mathematical Statistics}, + VOLUME = {30}, + YEAR = {2015}, + NUMBER = {3}, + PAGES = {328--351}, + ISSN = {0883-4237}, + DOI = {10.1214/14-STS511} +} + +@book{keeling08, + title={Modeling infectious diseases in humans and animals}, + author={Keeling, Matt J. and Rohani, Pejman}, + year={2008}, + publisher={Princeton University Press} +} + +@Article{king15, text={King AA, Domenech de Celle M, Magpantay FMG, Rohani P. 2015. Avoidable errors in the modelling of outbreaks of emerging pathogens, with special reference to Ebola. Proc. R. Soc. B 282: 20150347}, doi={10.1098/rspb.2015.0347}, + Title={Avoidable errors in the modelling of outbreaks of emerging pathogens, with special reference to {E}bola}, + Author={King, Aaron A. and Domenech de Celle, Matthieu and Magpantay, Felicia M. G. and Rohani, Pejman}, + Journal=PRSB, + xJournal={Proceedings of the Royal Society B}, + Volume={282}, + Pages={20150347}, + Year={2015} +} + +@Article{king16, text={King, A. A., Nguyen, D. and Ionides, E. L. (2015). Statistical Inference for Partially Observed Markov Processes via the R Package pomp. To appear in Journal of Statistical Software.}, +Author = {King, A. A. and Nguyen, D. and Ionides, E. L.}, +Year = {2016}, +Title = {Statistical Inference for Partially Observed {M}arkov Processes via the {R} Package pomp}, +Journal ={Journal of Statistical Software}, +Doi={10.18637/jss.v069.i12}, +Volume = {69}, +Pages = {1--43} +} + +@article{komorowski11, + title={Sensitivity, robustness, and identifiability in stochastic chemical kinetics models}, + author={Komorowski, Micha{\l} and Costa, Maria J and Rand, David A and Stumpf, Michael PH}, + journal={Proceedings of the National Academy of Sciences}, + volume={108}, + number={21}, + pages={8645--8650}, + year={2011}, + publisher={National Acad Sciences} +} + +LLLLLLLLLLLL + +@article{lee20, + title = {Achieving Coordinated National Immunity and Cholera Elimination in {H}aiti Through Vaccination: A Modelling Study}, + author = {Lee, Elizabeth C and Chao, Dennis L and Lemaitre, Joseph C and Matrajt, Laura and Pasetto, Damiano and Perez-Saez, Javier and Finger, Flavio and Rinaldo, Andrea and Sugimoto, Jonathan D and Halloran, M Elizabeth and Longini, Ira M and Ternier, Ralph and Vissieres, Kenia and Azman, Andrew S and Lessler, Justin and Ivers, Louise C}, + journal = {The Lancet Global Health}, + volume = {8}, + number = {8}, + pages = {e1081--e1089}, + year = {2020}, + publisher = {Elsevier}, + link = {https://doi.org/10.1016/S2214-109X(20)30310-7} +} + +@article{legland04, +author = "Le Gland, F. and Oudjane, N.", +doi = "10.1214/aoap/1075828050", +journal = "The Annals of Applied Probability", +month = "02", +number = "1", +pages = "144--187", +publisher = "The Institute of Mathematical Statistics", +title = "Stability and uniform approximation of nonlinear filters using the {H}ilbert metric and application to particle filters", +xurl = "https://doi.org/10.1214/aoap/1075828050", +volume = "14", +year = "2004" +} + +@article{losick08, + title={Stochasticity and Cell Fate}, + author={Losick, Richard and Desplan, Claude}, + journal={Science}, + volume={320}, + number={5872}, + pages={65--68}, + year={2008}, + publisher={American Association for the Advancement of Science} +} + +MMMMMMMMMM + +@article{marjoram03, doi={10.1073/pnas.0306899100}, + title={Markov chain {M}onte {C}arlo without likelihoods}, + author={Marjoram, Paul and Molitor, John and Plagnol, Vincent and Tavar{\'e}, Simon}, + journal={Proceedings of the National Academy of Sciences}, + volume={100}, + number={26}, + pages={15324--15328}, + year={2003}, + publisher={National Acad Sciences} +} +@article{marino19, doi={10.1002/ecy.2583}, + title={Evaluating consumptive and nonconsumptive predator effects on prey density using field times series data}, + author={Marino, J. A. and Peacor, S. D. and Bunnell, D. B. and Vanderploeg, H. A. and Pothoven, S. A. and Elgin, A. K. and Bence, J. R. and Jiao, J. and Ionides, E. L.}, + journal={Ecology}, + volume={100}, + pages={e02583}, + year={2019}, + publisher={Wiley Online Library} +} + +@Article{martinez-bakker15, Doi={10.1371/journal.pbio.1002172}, text={Martinez-Bakker, M., King, A. A. and Rohani, P. (2015). Unraveling the Transmission Ecology of Polio. PLoS Biology 13:e1002172}, +Author = {Martinez-Bakker, M. and King, A. A. and Rohani, P.}, +Year = {2015}, +Title = {Unraveling the Transmission Ecology of Polio}, +Journal = PLoS-Bio, +xJournal = {PLoS Biology}, +Volume = {13}, +Number={6}, +Pages = {e1002172} +} + +@Article{martinez-bakker14, + author = {Martinez-Bakker, Micaela and Bakker, Kevin M. and King, Aaron A. and Rohani, Pejman}, + title = {Human birth seasonality: latitudinal gradient and interplay with childhood disease dynamics}, + volume = {281}, + number = {1783}, + year = {2014}, + doi = {10.1098/rspb.2013.2438}, + publisher = {The Royal Society}, + issn = {0962-8452}, + journal = {Proceedings of the Royal Society of London B: Biological Sciences} +} + + + +@article{melnick53, + title={Development of neutralizing antibodies against the three types of poliomyelitis virus during an epidemic period. The ratio of inapparent infection to clinical poliomyelitis}, + author={Melnick, Joseph L and Ledinko, Nada}, + journal={American Journal of Hygiene}, + volume={58}, + number={2}, + pages={207--22}, + year={1953} +} + +@article{mesters14, + author={Mesters, Geert and Koopman, Siem Jan}, + year={2014}, + month={06}, + pages={127--140}, + title={Generalized dynamic panel data models with random effects for cross-section and time}, + volume={180}, + journal={Journal of Econometrics} +} + +@article{michaud21, + title={Sequential Monte Carlo Methods in the nimble and nimbleSMC R Packages}, + volume={100}, + url={https://www.jstatsoft.org/index.php/jss/article/view/v100i03}, + doi={10.18637/jss.v100.i03}, + number={3}, + journal={Journal of Statistical Software}, + author={Michaud, Nicholas and de Valpine, Perry and Turek, Daniel and Paciorek, Christopher J. and Nguyen, Dao}, + year={2021}, + pages={1–39} +} + +NNNNNNNNNNNN + +@article{ning21, +title = {Scalable {M}onte {C}arlo inference and rescaled local asymptotic normality}, +author = {Ning, N. and Ionides, E. L. and Ritov, Y.}, +year = {2021}, +journal = {Bernoulli}, +volume = {pre-published online} +} + +OOOOOOOOOO + +PPPPPPPPPPP + +@article{patel16, + title={A world free of polio —-- The final steps}, + author={Patel, Manish and Orenstein, Walter}, + journal={New England Journal of Medicine}, + volume={374}, + number={6}, + pages={501--503}, + year={2016}, + publisher={Mass Medical Soc} +} + +@article{pons-salort18, + title={Trends in serotype-specific immunity explain the incidence patterns of diseases caused by human enteroviruses}, + author={Pons-Salort, M. and Grassly, N. C.}, + journal={To appear in Science}, + xvolume={}, + year={2018} +} + + +QQQQQQQQQQQ + +RRRRRRRRRRRR + +@Manual{R, + title = {\proglang{R}: {A} Language and Environment for Statistical Computing}, + author = {{\proglang{R} Core Team}}, + organization = {\proglang{R} Foundation for Statistical Computing}, + address = {Vienna, Austria}, + year = {2017}, + url = {https://www.R-project.org/}, +} + +@article{ramsay96, + xURL = {http://www.jstor.org/stable/2345889}, + author = {Ramsay, J. O.}, + xjournal = {Journal of the Royal Statistical Society. Series B (Methodological)}, + journal=JRSSB, + xnumber = {3}, + pages = {495-508}, + title = {Principal Differential Analysis: Data Reduction by Differential Operators}, + volume = {58}, + year = {1996} +} + +@book{ramsay97, + title={Functional Data Analysis}, + author={Ramsay, James O and Silverman, B. W.}, + year={1997}, + publisher={Springer-Verlag}, + address={New York} +} + + +@article {ranjeva17, + author = {Ranjeva, Sylvia L. and Baskerville, Edward B. and Dukic, Vanja and Villa, Luisa L. and Lazcano-Ponce, Eduardo and Giuliano, Anna R. and Dwyer, Greg and Cobey, Sarah}, + title = {Recurring infection with ecologically distinct {HPV} types can explain high prevalence and diversity}, + volume = {114}, + number = {51}, + pages = {13573--13578}, + year = {2017}, + doi = {10.1073/pnas.1714712114}, + publisher = {National Academy of Sciences}, + xjournal = {Proceedings of the National Academy of Sciences}, + journal=PNAS, +} + +@article{ranjeva19, doi={10.1038/s41467-019-09652-6}, + title={Age-specific differences in the dynamics of protective immunity to influenza}, + author={Ranjeva, Sylvia and Subramanian, Rahul and Fang, Vicky J and Leung, Gabriel M and Ip, Dennis KM and Perera, Ranawaka APM and Peiris, JS Malik and Cowling, Benjamin J and Cobey, Sarah}, + journal={Nature communications}, + volume={10}, + number={1}, + pages={1660}, + year={2019}, + publisher={Nature Publishing Group} +} + +@article{raser05,doi = {10.1126/science.1105891}, +author = {Raser, Jonathan M. and O'Shea, Erin K.}, +title = {Noise in Gene Expression: Origins, Consequences, and Control}, +journal = {Science}, +volume = {309}, +number = {5743}, +pages = {2010-2013}, +year = {2005} +} + +@Article{romero-severson12, + Title = {Heterogeneity in Number and Type of Sexual Contacts in a Gay Urban Cohort}, + Volume = {4}, + Number = {1}, + Journal = {Statistical Communications in Infectious Diseases}, + Author = {Romero-Severson, Ethan O. and Alam, Shah Jamal and Volz, Erik M. and Koopman, James S.}, + Year = {2012}, +} + +@Article{romero-severson15,doi={10.1093/aje/kwv044}, text={Romero-Severson, EO, Volz, E, Koopman, JS, Leitner, T and Ionides, EL (2015). Dynamic Variation in Sexual Contact Rates in a Cohort of HIV-Negative Gay Men. American Journal of Epidemiology 182:255-262.}, + Title={Dynamic Variation in Sexual Contact Rates in a Cohort of {HIV}-Negative Gay Men}, + Author={Romero-Severson, EO and Volz, E and Koopman, JS and Leitner, T and Ionides, EL}, + xJournal={American Journal of Epidemiology}, + Journal=AJE, + Pages={255-262}, + Volume={182}, + Year={2015}, + Publisher=OUP +} + +@Article{roy13, doi={10.1371/journal.pntd.0001979}, +Author = "Roy, M. and Bouma, M. J. and Ionides, E. L. and Dhiman, R. C. and Pascual, M.", +Year = 2013, +Title = "The potential elimination of {P}lasmodium vivax malaria by relapse treatment: Insights from a transmission model and surveillance data from {NW} {I}ndia +", +Journal = "PLoS Neglected Tropical Diseases", +Volume = 7, +Pages = "e1979" +} + +SSSSSSSSSSS + +@Article{shrestha11, doi = {10.1371/journal.pcbi.1002135}, text={Shrestha, S., A. A. King and P. Rohani (2011). "Statistical Inference for Multi-Pathogen Systems". PLoS Comput Biol 7:e1002135. http://dx.doi.org/10.1371/journal.pcbi.1002135}, + Author = {Shrestha, Sourya AND King, Aaron A. AND Rohani, Pejman}, + xJournal = {PLoS Comput Biol}, + Journal = PLoS-CompBio, + Publisher = {Public Library of Science}, + Title = {Statistical Inference for Multi-Pathogen Systems}, + Year = {2011}, + month = {08}, + Volume = {7}, + Pages = {e1002135}, + Number = {8}, +} + +@article{sisson07, doi={10.1073/pnas.0607208104}, + title={Sequential {M}onte {C}arlo without likelihoods}, + author={Sisson, Scott A and Fan, Yanan and Tanaka, Mark M}, + journal={Proceedings of the National Academy of Sciences}, + volume={104}, + number={6}, + pages={1760--1765}, + year={2007}, + publisher={National Acad Sciences} +} + +@Article{smith17, Doi={10.1093/molbev/msx124}, + Author = {Smith, R. A. and Ionides, E. L. and King, A. A.}, + Year = {2017}, + Title = {Infectious Disease Dynamics Inferred from Genetic Data via Sequential {M}onte {C}arlo}, + xJournal = MBE, + Journal = {Molecular Biology and Evolution}, + Volume = {34}, + Pages ={2065–-2084}, + xNumber = {8} +} + +TTTTTTTTTTTTT + +UUUUUUUUUUUUU + +VVVVVVVVVVV + +@Article{vittinghoff99, +Author = {Vittinghoff, Eric and Douglas, John and Judon, Frank and McKiman, David and MacQueen, Kate and Buchinder, Susan P.}, +Title = {Per-Contact Risk of Human Immunodificiency Virus Tramnsmision between Male Sexual Partners}, +Volume = {150}, +Number = {3}, +Pages = {306--311}, +Year = {1999}, +Journal = {American Journal of Epidemiology}, +} + +WWWWWWWWW + +@article{wale19, +author = {Nina Wale and Matthew J. Jones and Derek G. Sim and Andrew F. Read and Aaron A. King }, +title = {The contribution of host cell-directed vs. parasite-directed immunity to the disease and dynamics of malaria infections}, +journal = {Proceedings of the National Academy of Sciences}, +volume = {116}, +number = {44}, +pages = {22386-22392}, +year = {2019}, +doi = {10.1073/pnas.1908147116}, +URL = {https://www.pnas.org/doi/abs/10.1073/pnas.1908147116}, +eprint = {https://www.pnas.org/doi/pdf/10.1073/pnas.1908147116} +} + + +@Article{whiteley13, + Author = {Whiteley, N.}, + Title = {Stability properties of some particle filters}, + xJournal={The Annals of Applied Probability}, + Journal=AOAP, + Volume={23}, + xNumber={6}, + Pages={2500--2537}, + Year={2013}, +} + +@article{wickham14, + title={Tidy data}, + author={Wickham, Hadley}, + journal={Journal of Statistical Software}, + volume={59}, + number={10}, + pages={1--23}, + year={2014}, + publisher={Foundation for Open Access Statistics} +} + +@article{wilkinson09, + title={Stochastic modelling for quantitative description of heterogeneous biological systems}, + author={Wilkinson, Darren J}, + journal={Nature Reviews Genetics}, + volume={10}, + number={2}, + pages={122--133}, + year={2009}, + publisher={Nature Publishing Group} +} + +@book{wilkinson12, +author = "Wilkinson, D. J.", +year = 2012, +title = "Stochastic Modelling for Systems Biology", +publisher = "Chapman \& Hall", +address = "Boca Raton, FL" +} + +@article{winsor32, doi={10.1073/pnas.18.1.1}, + title={The {G}ompertz curve as a growth curve}, + author={Winsor, Charles P}, + xjournal={Proceedings of the National Academy of Sciences}, + journal=PNAS, + volume={18}, + xnumber={1}, + pages={1--8}, + year={1932}, +} + +@Article{wood10, DOI={10.1038/nature09319}, +title={Statistical inference for noisy nonlinear ecological dynamic systems}, text={Wood (2010). Statistical inference for noisy nonlinear ecological dynamic systems. Nature 466:1102-1104.}, + Author={Wood, Simon N.}, + journal={Nature}, + Pages={1102--1104}, + Year={2010}, + Volume={466}, + xNumber={7310} +} + +XXXXXXXXX + +YYYYYYYYYYY + +@article{yang10,doi={10.1198/jasa.2010.ap09581}, +author = {Yang, Yang and Halloran, M. Elizabeth and Daniels, Michael J. and Longini, Ira M. and Burke, Donald S. and Cummings, Derek A. T.}, +title = {Modeling Competing Infectious Pathogens From a {B}ayesian Perspective: Application to Influenza Studies With Incomplete Laboratory Results}, +journal = {Journal of the American Statistical Association}, +volume = {105}, +number = {492}, +pages = {1310-1322}, +year = {2010} +} + +@article {yang12,doi = {10.1111/j.1541-0420.2012.01757.x}, +author = {Yang, Yang and Longini Jr., Ira M. and Halloran, M. Elizabeth and Obenchain, Valerie}, +title = {A Hybrid {EM} and {M}onte {C}arlo {EM} Algorithm and Its Application to Analysis of Transmission of Infectious Diseases}, +journal = {Biometrics}, +publisher = {Blackwell Publishing Inc}, +issn = {1541-0420}, +doi = {10.1111/j.1541-0420.2012.01757.x}, +volume={68}, +number={4}, +pages={1238--1249}, +year = {2012}, +} + +@article{yao05, + title={Functional data analysis for sparse longitudinal data}, + author={Yao, Fang and M{\"u}ller, Hans-Georg and Wang, Jane-Ling}, + xjournal={Journal of the American Statistical Association}, + journal=JASA, + volume={100}, + number={470}, + pages={577--590}, + year={2005}, + publisher={Taylor \& Francis} +} + +ZZZZZZZZZZ + + + diff --git a/vignettes/articles/package_tutorial/jasa3.bst b/vignettes/articles/package_tutorial/jasa3.bst new file mode 100644 index 0000000..c293c5f --- /dev/null +++ b/vignettes/articles/package_tutorial/jasa3.bst @@ -0,0 +1,1460 @@ +%% +%% This is file `jasa3.bst', +%% generated with the docstrip utility. +%% +%% The original source files were: +%% +%% merlin.mbs (with options: `,ay,cay,nm-rev,ed-rev,nmdash,dt-beg,yr-par,note-yr,tit-qq,atit-u,vnum-x,volp-com,pp-last,bkpg-x,add-pub,url,url-blk,edpar,bkedcap,blk-com,blknt,pp,xedn') +%% ---------------------------------------- +%% *** AMZ attempt at JASA style *** +%% +%% Copyright 1994-1999 Patrick W Daly + % =============================================================== + % IMPORTANT NOTICE: + % This bibliographic style (bst) file has been generated from one or + % more master bibliographic style (mbs) files, listed above. + % + % This generated file can be redistributed and/or modified under the terms + % of the LaTeX Project Public License Distributed from CTAN + % archives in directory macros/latex/base/lppl.txt; either + % version 1 of the License, or any later version. + % =============================================================== + % Name and version information of the main mbs file: + % \ProvidesFile{merlin.mbs}[1999/05/28 3.89 (PWD)] + % For use with BibTeX version 0.99a or later + %------------------------------------------------------------------- + % This bibliography style file is intended for texts in ENGLISH + % This is an author-year citation style bibliography. As such, it is + % non-standard LaTeX, and requires a special package file to function properly. + % Such a package is natbib.sty by Patrick W. Daly + % or: chicago.sty + % The form of the bibitem entries is + % \bibitem[\protect\citeauthoryear{Jones, Baker, and Smith} + % {Jones et al.}{1990}{key}... + %--------------------------------------------------------------------- + +ENTRY + { address + author + booktitle + chapter + edition + editor + howpublished + institution + journal + key + month + note + number + organization + pages + publisher + school + series + title + type + url + volume + year + } + {} + { label extra.label sort.label short.list } + +INTEGERS { output.state before.all mid.sentence after.sentence after.block } + +FUNCTION {init.state.consts} +{ #0 'before.all := + #1 'mid.sentence := + #2 'after.sentence := + #3 'after.block := +} + +STRINGS { s t } + +FUNCTION {output.nonnull} +{ 's := + output.state mid.sentence = + { ", " * write$ } + { output.state after.block = + { add.period$ write$ + newline$ + "\newblock " write$ + } + { output.state before.all = + 'write$ + { add.period$ " " * write$ } + if$ + } + if$ + mid.sentence 'output.state := + } + if$ + s +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.check} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +FUNCTION {fin.entry} +{ add.period$ + write$ + newline$ +} + +FUNCTION {new.block} +{ output.state before.all = + 'skip$ + { after.block 'output.state := } + if$ +} + +FUNCTION {new.sentence} +{ output.state after.block = + 'skip$ + { output.state before.all = + 'skip$ + { after.sentence 'output.state := } + if$ + } + if$ +} + +FUNCTION {add.blank} +{ " " * before.all 'output.state := +} + +FUNCTION {date.block} +{ + skip$ +} + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ 'skip$ + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + 'skip$ + if$ +} + +FUNCTION {non.stop} +{ duplicate$ + "}" * add.period$ + #-1 #1 substring$ "." = +} + +FUNCTION {new.block.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.block + if$ +} + +FUNCTION {field.or.null} +{ duplicate$ empty$ + { pop$ "" } + 'skip$ + if$ +} + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "{\em " swap$ * "\/}" * } + if$ +} + + +FUNCTION {capitalize} +{ "u" change.case$ "t" change.case$ } + +FUNCTION {space.word} +{ " " swap$ * " " * } + + % Here are the language-specific definitions for explicit words. + % Each function has a name bbl.xxx where xxx is the English word. + % The language selected here is ENGLISH +FUNCTION {bbl.and} +{ "and"} + +FUNCTION {bbl.etal} +{ "et~al." } + +FUNCTION {bbl.editors} +{ "editors" } + +FUNCTION {bbl.editor} +{ "editor" } + +FUNCTION {bbl.edby} +{ "edited by" } + +FUNCTION {bbl.edition} +{ "edition" } + +FUNCTION {bbl.volume} +{ "volume" } + +FUNCTION {bbl.of} +{ "of" } + +FUNCTION {bbl.number} +{ "number" } + +FUNCTION {bbl.nr} +{ "no." } + +FUNCTION {bbl.in} +{ "in" } + +FUNCTION {bbl.pages} +{ "pp." } + +FUNCTION {bbl.page} +{ "p." } + +FUNCTION {bbl.chapter} +{ "chapter" } + +FUNCTION {bbl.techrep} +{ "Technical Report" } + +FUNCTION {bbl.mthesis} +{ "Master's thesis" } + +FUNCTION {bbl.phdthesis} +{ "Ph.D. thesis" } + +MACRO {jan} {"January"} + +MACRO {feb} {"February"} + +MACRO {mar} {"March"} + +MACRO {apr} {"April"} + +MACRO {may} {"May"} + +MACRO {jun} {"June"} + +MACRO {jul} {"July"} + +MACRO {aug} {"August"} + +MACRO {sep} {"September"} + +MACRO {oct} {"October"} + +MACRO {nov} {"November"} + +MACRO {dec} {"December"} + +MACRO {acmcs} {"ACM Computing Surveys"} + +MACRO {acta} {"Acta Informatica"} + +MACRO {cacm} {"Communications of the ACM"} + +MACRO {ibmjrd} {"IBM Journal of Research and Development"} + +MACRO {ibmsj} {"IBM Systems Journal"} + +MACRO {ieeese} {"IEEE Transactions on Software Engineering"} + +MACRO {ieeetc} {"IEEE Transactions on Computers"} + +MACRO {ieeetcad} + {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"} + +MACRO {ipl} {"Information Processing Letters"} + +MACRO {jacm} {"Journal of the ACM"} + +MACRO {jcss} {"Journal of Computer and System Sciences"} + +MACRO {scp} {"Science of Computer Programming"} + +MACRO {sicomp} {"SIAM Journal on Computing"} + +MACRO {tocs} {"ACM Transactions on Computer Systems"} + +MACRO {tods} {"ACM Transactions on Database Systems"} + +MACRO {tog} {"ACM Transactions on Graphics"} + +MACRO {toms} {"ACM Transactions on Mathematical Software"} + +MACRO {toois} {"ACM Transactions on Office Information Systems"} + +MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"} + +MACRO {tcs} {"Theoretical Computer Science"} + + +FUNCTION {format.url} +{ url empty$ + { "" } + { "\urlprefix\url{" url * "}" * } + if$ +} + +INTEGERS { nameptr namesleft numnames } + +FUNCTION {format.names} +{ 's := + "" 't := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr + "{vv~}{ll}{, jj}{, f.}" format.name$ + 't := + nameptr #1 > + { + namesleft #1 > + { ", " * t * } + { + numnames #2 > + { "," * } + 'skip$ + if$ + s nameptr "{ll}" format.name$ duplicate$ "others" = + { 't := } + { pop$ } + if$ + t "others" = + { + " " * bbl.etal * + } + { bbl.and + space.word * t * + } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {format.names.ed} +{ format.names } + +FUNCTION {format.key} +{ empty$ + { key field.or.null } + { "" } + if$ +} + +FUNCTION {format.authors} +{ author empty$ + { "" } + { author format.names } + if$ +} + +FUNCTION {format.editors} +{ editor empty$ + { "" } + { editor format.names + " (" * + editor num.names$ #1 > + 'bbl.editors + 'bbl.editor + if$ + capitalize + * ")" * + } + if$ +} + +FUNCTION {format.in.editors} +{ editor empty$ + { "" } + { editor format.names.ed + editor num.names$ #1 > + { " (" * bbl.editors * ")" * } + { " (" * bbl.editor * ")" * } + if$ + } + if$ +} + +FUNCTION {format.note} +{ + note empty$ + { "" } + { note #1 #1 substring$ + duplicate$ "{" = + 'skip$ + { output.state mid.sentence = + { "l" } + { "u" } + if$ + change.case$ + } + if$ + note #2 global.max$ substring$ * + } + if$ +} + +FUNCTION {format.title} +{ title empty$ + { "" } + { title + "\enquote{" swap$ * + non.stop + { ",} " * } + { "} " * } + if$ + } + if$ +} + +FUNCTION {end.quote.title} +{ title empty$ + 'skip$ + { before.all 'output.state := } + if$ +} + +FUNCTION {format.full.names} +{'s := + "" 't := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr + "{vv~}{ll}" format.name$ + 't := + nameptr #1 > + { + namesleft #1 > + { ", " * t * } + { + s nameptr "{ll}" format.name$ duplicate$ "others" = + { 't := } + { pop$ } + if$ + t "others" = + { + " " * bbl.etal * + } + { + numnames #2 > + { "," * } + 'skip$ + if$ + bbl.and + space.word * t * + } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {author.editor.key.full} +{ author empty$ + { editor empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { editor format.full.names } + if$ + } + { author format.full.names } + if$ +} + +FUNCTION {author.key.full} +{ author empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { author format.full.names } + if$ +} + +FUNCTION {editor.key.full} +{ editor empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { editor format.full.names } + if$ +} + +FUNCTION {make.full.names} +{ type$ "book" = + type$ "inbook" = + or + 'author.editor.key.full + { type$ "proceedings" = + 'editor.key.full + 'author.key.full + if$ + } + if$ +} + +FUNCTION {output.bibitem} +{ newline$ + "\bibitem[" write$ + "\protect\citeauthoryear{" make.full.names * "}{" * write$ + label write$ + "}{" year duplicate$ empty$ + { pop$ "????" } + 'skip$ + if$ + * extra.label * "}]{" * write$ + cite$ write$ + "}" write$ + newline$ + "" + before.all 'output.state := +} + +FUNCTION {n.dashify} +{ + 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + +FUNCTION {word.in} +{ bbl.in + " " * } + +FUNCTION {format.date} +{ year duplicate$ empty$ + { "empty year in " cite$ * "; set to ????" * warning$ + pop$ "????" } + 'skip$ + if$ + extra.label * + before.all 'output.state := + " (" swap$ * ")" * +} + +FUNCTION {format.btitle} +{ title emphasize +} + +FUNCTION {tie.or.space.connect} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ * * +} + +FUNCTION {either.or.check} +{ empty$ + 'pop$ + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {format.bvolume} +{ volume empty$ + { "" } + { bbl.volume volume tie.or.space.connect + series empty$ + 'skip$ + { bbl.of space.word * series emphasize * } + if$ + "volume and number" number either.or.check + } + if$ +} + +FUNCTION {format.number.series} +{ volume empty$ + { number empty$ + { series field.or.null } + { output.state mid.sentence = + { bbl.number } + { bbl.number capitalize } + if$ + number tie.or.space.connect + series empty$ + { "there's a number but no series in " cite$ * warning$ } + { bbl.in space.word * series * } + if$ + } + if$ + } + { "" } + if$ +} + + +FUNCTION {format.edition} +{ edition empty$ + { "" } + { output.state mid.sentence = + { edition "l" change.case$ " " * bbl.edition * } + { edition "t" change.case$ " " * bbl.edition * } + if$ + } + if$ +} + +INTEGERS { multiresult } + +FUNCTION {multi.page.check} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {format.pages} +{ pages empty$ + { "" } + { pages multi.page.check + { pages n.dashify } + { pages } + if$ + } + if$ +} + +FUNCTION {format.journal.pages} +{ pages empty$ + 'skip$ + { duplicate$ empty$ + { pop$ format.pages } + { + ", " * + pages n.dashify * + } + if$ + } + if$ +} + +FUNCTION {format.vol.num.pages} +{ volume field.or.null +} + +FUNCTION {format.chapter.pages} +{ chapter empty$ + { "" } + { type empty$ + { bbl.chapter } + { type "l" change.case$ } + if$ + chapter tie.or.space.connect + } + if$ +} + +FUNCTION {format.in.ed.booktitle} +{ booktitle empty$ + { "" } + { editor empty$ + { word.in booktitle emphasize * } + { word.in format.in.editors * ", " * + booktitle emphasize * } + if$ + } + if$ +} + +FUNCTION {format.thesis.type} +{ type empty$ + 'skip$ + { pop$ + type "t" change.case$ + } + if$ +} + +FUNCTION {format.tr.number} +{ type empty$ + { bbl.techrep } + 'type + if$ + number empty$ + { "t" change.case$ } + { number tie.or.space.connect } + if$ +} + +FUNCTION {format.article.crossref} +{ + word.in + " \cite{" * crossref * "}" * +} + +FUNCTION {format.book.crossref} +{ volume empty$ + { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ + word.in + } + { bbl.volume volume tie.or.space.connect + bbl.of space.word * + } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.incoll.inproc.crossref} +{ + word.in + " \cite{" * crossref * "}" * +} + +FUNCTION {format.org.or.pub} +{ 't := + "" + address empty$ t empty$ and + 'skip$ + { + address empty$ + 'skip$ + { address * } + if$ + t empty$ + 'skip$ + { address empty$ + 'skip$ + { ": " * } + if$ + t * + } + if$ + } + if$ +} + +FUNCTION {format.publisher.address} +{ publisher empty$ + { "empty publisher in " cite$ * warning$ + "" + } + { publisher } + if$ + format.org.or.pub +} + +FUNCTION {format.organization.address} +{ organization empty$ + { "" } + { organization } + if$ + format.org.or.pub +} + +STRINGS {oldname} + +FUNCTION {name.or.dash} +{ 's := + oldname empty$ + { s 'oldname := s } + { s oldname = + { "---" } + { s 'oldname := s } + if$ + } + if$ +} + +FUNCTION {article} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + crossref missing$ + { journal + emphasize + "journal" output.check + format.vol.num.pages output + } + { format.article.crossref output.nonnull + format.pages output + } + if$ + format.journal.pages + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {book} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check + editor format.key output + name.or.dash + } + { format.authors output.nonnull + name.or.dash + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + format.date "year" output.check + date.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + format.number.series output + format.publisher.address output + } + { + format.book.crossref output.nonnull + } + if$ + format.edition output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {booklet} +{ output.bibitem + format.authors output + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + howpublished output + address output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {inbook} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check + editor format.key output + name.or.dash + } + { format.authors output.nonnull + name.or.dash + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + format.date "year" output.check + date.block + format.btitle "title" output.check + crossref missing$ + { + format.bvolume output + format.chapter.pages "chapter and pages" output.check + format.number.series output + format.publisher.address output + } + { + format.chapter.pages "chapter and pages" output.check + format.book.crossref output.nonnull + } + if$ + format.edition output + format.pages "pages" output.check + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {incollection} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.chapter.pages output + format.publisher.address output + format.edition output + } + { format.incoll.inproc.crossref output.nonnull + format.chapter.pages output + } + if$ + format.pages "pages" output.check + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {inproceedings} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + publisher empty$ + { format.organization.address output } + { organization output + format.publisher.address output + } + if$ + } + { format.incoll.inproc.crossref output.nonnull + format.pages output + } + if$ + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {conference} { inproceedings } + +FUNCTION {manual} +{ output.bibitem + format.authors output + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.btitle "title" output.check + organization output + address output + format.edition output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {mastersthesis} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.btitle "title" output.check + bbl.mthesis format.thesis.type output.nonnull + school "school" output.check + address output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {misc} +{ output.bibitem + format.authors output + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title output + end.quote.title + howpublished output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {phdthesis} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.btitle "title" output.check + bbl.phdthesis format.thesis.type output.nonnull + school "school" output.check + address output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {proceedings} +{ output.bibitem + format.editors output + editor format.key output + name.or.dash + format.date "year" output.check + date.block + format.btitle "title" output.check + format.bvolume output + format.number.series output + publisher empty$ + { format.organization.address output } + { organization output + format.publisher.address output + } + if$ + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {techreport} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + format.tr.number output.nonnull + institution "institution" output.check + address output + format.url output + new.sentence + format.note output + fin.entry +} + +FUNCTION {unpublished} +{ output.bibitem + format.authors "author" output.check + author format.key output + name.or.dash + format.date "year" output.check + date.block + format.title "title" output.check + end.quote.title + format.url output + new.sentence + format.note "note" output.check + fin.entry +} + +FUNCTION {default.type} { misc } + +READ + +FUNCTION {sortify} +{ purify$ + "l" change.case$ +} + +INTEGERS { len } + +FUNCTION {chop.word} +{ 's := + 'len := + s #1 len substring$ = + { s len #1 + global.max$ substring$ } + 's + if$ +} + +FUNCTION {format.lab.names} +{ 's := + "" 't := + s #1 "{vv~}{ll}" format.name$ + s num.names$ duplicate$ + #2 > + { pop$ + " " * bbl.etal * + } + { #2 < + 'skip$ + { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = + { + " " * bbl.etal * + } + { bbl.and space.word * s #2 "{vv~}{ll}" format.name$ + * } + if$ + } + if$ + } + if$ +} + +FUNCTION {author.key.label} +{ author empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { author format.lab.names } + if$ +} + +FUNCTION {author.editor.key.label} +{ author empty$ + { editor empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { editor format.lab.names } + if$ + } + { author format.lab.names } + if$ +} + +FUNCTION {editor.key.label} +{ editor empty$ + { key empty$ + { cite$ #1 #3 substring$ } + 'key + if$ + } + { editor format.lab.names } + if$ +} + +FUNCTION {calc.short.authors} +{ type$ "book" = + type$ "inbook" = + or + 'author.editor.key.label + { type$ "proceedings" = + 'editor.key.label + 'author.key.label + if$ + } + if$ + 'short.list := +} + +FUNCTION {calc.label} +{ calc.short.authors + short.list + ", " + * + year duplicate$ empty$ + { pop$ "????" } + 'skip$ + if$ + * + 'label := +} + +FUNCTION {calc.short.label} +{ calc.short.authors short.list + 'label := +} + +FUNCTION {sort.format.names} +{ 's := + #1 'nameptr := + "" + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr + "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" + format.name$ 't := + nameptr #1 > + { + " " * + namesleft #1 = t "others" = and + { "zzzzz" * } + { t sortify * } + if$ + } + { t sortify * } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {sort.format.title} +{ 't := + "A " #2 + "An " #3 + "The " #4 t chop.word + chop.word + chop.word + sortify + #1 global.max$ substring$ +} + +FUNCTION {author.sort} +{ author empty$ + { key empty$ + { "to sort, need author or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.editor.sort} +{ author empty$ + { editor empty$ + { key empty$ + { "to sort, need author, editor, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { editor sort.format.names } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {editor.sort} +{ editor empty$ + { key empty$ + { "to sort, need editor or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { editor sort.format.names } + if$ +} + +FUNCTION {presort} +{ calc.label + label sortify + " " + * + type$ "book" = + type$ "inbook" = + or + 'author.editor.sort + { type$ "proceedings" = + 'editor.sort + 'author.sort + if$ + } + if$ + #1 entry.max$ substring$ + 'sort.label := + sort.label + * + " " + * + title field.or.null + sort.format.title + * + #1 entry.max$ substring$ + 'sort.key$ := +} + +ITERATE {presort} + +SORT + +STRINGS { last.label next.extra } + +INTEGERS { last.extra.num number.label } + +FUNCTION {initialize.extra.label.stuff} +{ #0 int.to.chr$ 'last.label := + "" 'next.extra := + #0 'last.extra.num := + #0 'number.label := +} + +FUNCTION {forward.pass} +{ last.label label = + { last.extra.num #1 + 'last.extra.num := + last.extra.num int.to.chr$ 'extra.label := + } + { "a" chr.to.int$ 'last.extra.num := + "" 'extra.label := + label 'last.label := + } + if$ + number.label #1 + 'number.label := +} + +FUNCTION {reverse.pass} +{ next.extra "b" = + { "a" 'extra.label := } + 'skip$ + if$ + extra.label 'next.extra := + extra.label + duplicate$ empty$ + 'skip$ + { "{\natexlab{" swap$ * "}}" * } + if$ + 'extra.label := +} + +EXECUTE {initialize.extra.label.stuff} + +ITERATE {forward.pass} + +REVERSE {reverse.pass} + +FUNCTION {bib.sort.order} +{ sort.label + " " + * + year field.or.null sortify + * + " " + * + title field.or.null + sort.format.title + * + #1 entry.max$ substring$ + 'sort.key$ := + calc.short.label +} + +ITERATE {bib.sort.order} + +SORT + +FUNCTION {begin.bib} +{ preamble$ empty$ + 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" number.label int.to.str$ * "}" * + write$ newline$ + "\newcommand{\enquote}[1]{``#1''}" + write$ newline$ + "\expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi" + write$ newline$ + "\expandafter\ifx\csname url\endcsname\relax" + write$ newline$ + " \def\url#1{{\tt #1}}\fi" + write$ newline$ + "\expandafter\ifx\csname urlprefix\endcsname\relax\def\urlprefix{URL }\fi" + write$ newline$ +} + +EXECUTE {begin.bib} + +EXECUTE {init.state.consts} + +ITERATE {call.type$} + +FUNCTION {end.bib} +{ newline$ + "\end{thebibliography}" write$ newline$ +} + +EXECUTE {end.bib} +%% End of customized bst file +%% +%% End of file `jasa3.bst'. diff --git a/vignettes/articles/package_tutorial/rda3/block.rda b/vignettes/articles/package_tutorial/rda3/block.rda new file mode 100644 index 0000000..6f3eae7 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/block.rda differ diff --git a/vignettes/articles/package_tutorial/rda3/mif.rda b/vignettes/articles/package_tutorial/rda3/mif.rda new file mode 100644 index 0000000..0280db1 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/mif.rda differ diff --git a/vignettes/articles/package_tutorial/rda3/pf.rda b/vignettes/articles/package_tutorial/rda3/pf.rda new file mode 100644 index 0000000..fff1685 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/pf.rda differ diff --git a/vignettes/articles/package_tutorial/rda3/profile.rda b/vignettes/articles/package_tutorial/rda3/profile.rda new file mode 100644 index 0000000..54ca273 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/profile.rda differ diff --git a/vignettes/articles/package_tutorial/rda3/profile_kalman.rda b/vignettes/articles/package_tutorial/rda3/profile_kalman.rda new file mode 100644 index 0000000..df50b75 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/profile_kalman.rda differ diff --git a/vignettes/articles/package_tutorial/rda3/profile_table.rda b/vignettes/articles/package_tutorial/rda3/profile_table.rda new file mode 100644 index 0000000..118d8c6 Binary files /dev/null and b/vignettes/articles/package_tutorial/rda3/profile_table.rda differ diff --git a/vignettes/articles/package_tutorial/tutorial.Rnw b/vignettes/articles/package_tutorial/tutorial.Rnw new file mode 100644 index 0000000..56662b0 --- /dev/null +++ b/vignettes/articles/package_tutorial/tutorial.Rnw @@ -0,0 +1,923 @@ +\documentclass[12pt]{article} +\usepackage{amsmath} +\usepackage{graphicx} +%\usepackage{enumerate} +\usepackage{url} % not crucial - just used below for the URL +\usepackage{amsfonts} +\usepackage{setspace} +\usepackage{caption} +\usepackage{subcaption} +\usepackage[ruled,noline]{algorithm2e} +\usepackage[table]{xcolor} +\usepackage{mathtools} +\usepackage{authblk} +\usepackage{xspace} +\usepackage{natbib} +\DeclarePairedDelimiter\ceil{\lceil}{\rceil} +\DeclarePairedDelimiter\floor{\lfloor}{\rfloor} +\DeclareMathOperator*{\argmax}{arg\,max} + +%\pdfminorversion=4 +% NOTE: To produce blinded version, replace "0" with "1" below. +\newcommand{\blind}{0} + +\newcommand{\proglang}[1]{\textsf{#1}} +\newcommand\figTitle{\bf} +\newcommand\pkg{\texttt} +\newcommand\code{\texttt} +\newcommand\eic[1]{\textcolor{orange}{[EI: #1]}} +\newcommand\jwc[1]{\textcolor{brown}{[JW: #1]}} +\newcommand\TODO[1]{\textcolor{red}{[TODO: #1]}} +\newcommand\allVar{\psi} +\newcommand\allVarExt{\bar{\psi}} +\newcommand\improvedCell{\cellcolor{blue!25}} +\newcommand\R{\texttt{R}\xspace} +\newcommand\panelPomp{\texttt{panelPomp}\xspace} +\newcommand\pomp{\texttt{pomp}\xspace} +\newcommand\nimble{\texttt{nimble}\xspace} +\newcommand\mcstate{\texttt{mcstate}\xspace} +\newcommand\spatPomp{\texttt{spatPomp}\xspace} +\newcommand\PanelPOMP{PanelPOMP} +\newcommand\example[1]{ \item #1} + +% PSUEDO-CODE HELPERS +\newcommand\mycolon{{\hspace{0.6mm}:\hspace{0.6mm}}} +\newcommand\np{j} +\newcommand\Np{J} % Number of particles +\newcommand\nrep{i} % Replicate index (in likelihood evaluations) +\newcommand\Nrep{I} % Number of replicates (in likelihood evaluations) +\newcommand\unit{u} % Unit index +\newcommand\Unit{U} % Number of panel units +\renewcommand\time{n} +\newcommand\Time{N} +\newcommand\Nmif{M} % Number of iterated filtering steps +\newcommand\nmif{m} % Mif index +\newcommand\Shared{\Phi} +\newcommand\Specific{\Psi} +\newcommand\shared{\phi} +\newcommand\specific{\psi} +\newcommand\Nshared{A} +\newcommand\nshared{a} +\newcommand\Nspecific{B} +\newcommand\nspecific{b} +\newcommand\dimSpecific{{\mathrm{dim}(\Psi)}} +\newcommand\dimShared{{\mathrm{dim}(\Phi)}} +\newcommand\argequals{{\,=\,}} +\newcommand\mystretch{\rule[-2mm]{0mm}{5mm} } +\newcommand\myBigStretch{\rule[-3mm]{0mm}{5mm} } +\newcommand\asp{\hspace{6mm}} + +% MATH HELPERS +\newcommand\RealSpace{\mathbb{R}} +\newcommand\loglik{\lambda} +\newcommand\lik{\ell} +\newcommand\Xspace{{\mathbb X}} +\newcommand\Yspace{{\mathbb Y}} +\newcommand\Rspace{{\mathbb R}} +\newcommand\hatTheta{\widehat{\Theta}} +\newcommand\Xdim{{\mathrm{dim}}(\Xspace)} +\newcommand\Ydim{{\mathrm{dim}}(\Yspace)} +%\newcommand\Thetadim{{{\mathrm{dim}}(\Theta)}} +\newcommand\Thetadim{D} +\newcommand\contactsRate{\Lambda} +\newcommand\given{{\, | \,}} +\newcommand\giventh{{\,;\,}} +\newcommand\normal{\mathrm{Normal}} +\newcommand\seq[2]{{#1}\!:\!{#2}} +\newcommand\prob{\mathbb{P}} + +% MARGINS +\addtolength{\oddsidemargin}{-.5in}% +\addtolength{\evensidemargin}{-1in}% +\addtolength{\textwidth}{1in}% +\addtolength{\textheight}{1.7in}% +\addtolength{\topmargin}{-1in}% + +\begin{document} + +%\bibliographystyle{natbib} + +\def\spacingset#1{\renewcommand{\baselinestretch}% +{#1}\small\normalsize} \spacingset{1} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% \if0\blind +% { +% \title{\bf Likelihood Based Inference for ARMA Models} +% \author{ +% Carles Bret\'o, Universitat de Val\`encia\\ +% Jesse Wheeler, University of Michigan\\ +% Edward L. Ionides, University of Michigan\\ +% Aaron A. King, University of Michigan} +% \maketitle +% } \fi + +\if0\blind +{ + \title{\bf A tutorial on panel data analysis using partially observed Markov processes via the \R package \panelPomp} + + \author[1]{Carles Bret\'o} + \author[2]{Jesse Wheeler} + \author[2]{Edward L. Ionides} + \author[2]{Aaron A. King} + + \affil[1]{Universitat de Val\`encia} + \affil[2]{University of Michigan} + + \maketitle +} \fi + +\if1\blind +{ + \bigskip + \bigskip + \bigskip + \begin{center} + {\Large\bf A tutorial on panel data analysis using partially observed Markov processes via the \R package \panelPomp} +\end{center} + \medskip +} \fi + +\bigskip +\begin{abstract} +\noindent The \R package \panelPomp package supports analysis of panel data via a general class of partially observed Markov process models (PanelPOMP). +We present a tutorial for this package, describing how the mathematical concept of a PanelPOMP is represented in the software, as well as a demonstration of typical use-cases of \panelPomp. +Monte Carlo methods used for POMP models require adaptation for PanelPOMP models due to the higher dimensionality of panel data. +The package takes advantage of recent advances for PanelPOMP, including an iterated filtering algorithm, Monte Carlo adjusted profile methodology and block optimization methodology to assist with the large parameter spaces that can arise with panel models. +In addition, tools for manipulation of models and data are provided that take advantage of the panel structure. +\end{abstract} + +\vfill + +<>= + +run_level <- 3 +VERBOSE <- FALSE + +rda_dir <- paste0("rda",run_level,"/") +if(!dir.exists(rda_dir)) dir.create(rda_dir) + +library(knitr) +library(panelPomp) +library(ggplot2) +library(foreach) +library(doParallel) +library(doRNG) + +cores <- detectCores()/2 +registerDoParallel(cores) +registerDoRNG(25880) + +knitr::opts_chunk$set( + echo = FALSE, + warning = FALSE, + message = FALSE, + fig.align = 'center', + dev = 'cairo_pdf' +) + +# Setting black and white ggplot2 theme for entire document. +theme_set( + theme_bw() + + theme( + plot.title = element_text(hjust = 0.5), + text = element_text(family = 'Times') # Try other options, including Helvetica, which is the default. + ) +) + +myRound <- function (x, digits = 1) +{ + # taken from the broman package + if (digits < 1) + stop("This is intended for the case digits >= 1.") + if (length(digits) > 1) { + digits <- digits[1] + warning("Using only digits[1]") + } + tmp <- sprintf(paste("%.", digits, "f", sep = ""), x) + zero <- paste0("0.", paste(rep("0", digits), collapse = "")) + tmp[tmp == paste0("-", zero)] <- zero + tmp +} +@ + + +\newpage +\spacingset{1.25} + +\AtBeginEnvironment{algorithm}{% + \singlespacing + % \renewcommand{\arraystretch}{1}% +} + +\section{Introduction} +\label{sec:intro} + +This tutorial describes a typical use-case of the \panelPomp \R package. +Partially observed Markov process (POMP) models---also known as state-space or hidden Markov models---are useful mathematical tools for modeling non-linear dynamic systems. +POMP models describe a system via an unobserved dynamic model that has the Markov property, coupled with a model for how observations are drawn from the latent process. +Various software packages provide platforms for performing statistical analysis of these systems using POMP models (for instance, \pomp \citep{king16}, \spatPomp \citep{asfaw21}, \nimble \citep{michaud21}, and \mcstate \citep{fitzjohn20}). +However, particular challenges arise when modeling \emph{panel data} via POMP models; these data arise when time series are measured on a collection of independent units. +While each unit may be modeled separately, analyzing the data as a single collection can provide insights into the underlying dynamical system that may not be obtained otherwise. +For instance, each time series in the panel may be too short to infer a complex dynamical model, so that inference on an underlying model must combine information across the units. +\panelPomp is, to our knowledge, the first software package specifically addressing these issues. +The utility of \panelPomp has been demonstrated in several scientific applications \citep{ranjeva17,ranjeva19,wale19,domeyer22,lee20}. + +The current version of \panelPomp emphasizes plug-and-play methods \citep{breto09,he10}, also known as likelihood-free \citep{marjoram03,sisson07}, that are applicable to dynamic models for which a simulator is available even when the transition densities are unavailable. +This class of flexible algorithms allow researchers to build their models based on scientific reasoning rather than statistical convenience, typically at the expense of computational efficiency. +In the following sections, this tutorial demonstrates how the \panelPomp package can be used to model nonlinear dynamic systems using plug-and-play methodologies. + +\section{PanelPOMP models} \label{sec:PanelPOMP} + +Panel data is a collection of multiple time series datasets, each possibly multivariate on its own, where each time series is associated with a unit; the units can represent spatial locations, agents in a system, or other units for which data is collected over time. +For convenience of identifying units in the panel, we use numeric labels $\{1,2,\dots,\Unit\}$, which we also write as $1\mycolon\Unit$. +Time series from each unit may be of different lengths, and so we define $N_\unit$ as the number of measurements collected on unit $\unit$. +The observations are modeled as a realization of a stochastic process $Y_{\unit,1:N_\unit}$, observed at times $t_{\unit,1}>= +gomp <- panelGompertz(N = 100, U = 50) +@ +\noindent Here, the number of units is \code{length(gomp)=\Sexpr{length(gomp)}}. +\panelPomp uses S4 classes \citep{chambers98,genolini08} with \code{gomp} having the base class \code{panelPomp}. +Because data are a key component of PanelPOMP models, the \code{panelGompertz} function first creates a PanelPOMP model, and then generates a dataset by simulating from that model using a reproducible seed specified by the \code{seed} argument to the function. + +<>= +plot(gomp[1:2]) +@ + +\begin{figure} +\begin{center} +\resizebox{3in}{!}{\includegraphics{figure/plot-gomp-1}} +\resizebox{3in}{!}{\includegraphics{figure/plot-gomp-2}} +\caption{Separate plots produced by \code{plot(gomp[1:2])}.} +\label{fig-plot-gomp} +\end{center} +\end{figure} + +Commonly, the first thing to do with a new object is to plot it, and Fig.~\ref{fig-plot-gomp} demonstrates the panelPomp \code{plot} method applied after subsetting \code{gomp}. +The units of a panelPomp do not necessarily share the same variables, so in general a sequence of separate plots is all that can be offered. +It may happen that the units can meaningfully be plotted on the same axis, and that can be achieved by coercing the \code{panelPomp} object to a \code{pompList} and using the \code{plot} method for that class (Fig.~\ref{fig:plot-gomp-pompList}). +A third option is to export the \code{panelPomp} object via \code{as(gomp,"data.frame")} and work this this to produce customized plots. + +<>= +par(mai=c(0.8,0.8,0.1,0.1)) +plot(as(gomp,"pompList")) +@ + +A basic operation is simulation. +We can generate another simulation with the same parameter values: +<>= +gomp2 <- simulate(gomp) +@ +\noindent We can simulate from the same model at new parameter values by giving additional arguments to \code{simulate}. +There are two different representations of parameters within \panelPomp which are convenient in different situations. +The unit-specific parameters are naturally represented as a matrix, set with the \code{specific} argument in the \code{panelPomp} constructor function, with a column for each units and a row for each parameter. +Similarly, the shared parameters are a named vector that can be set using the \code{shared} argument. +Alternatively, we can consider the parameters as a single named vector, with a naming convention that \code{"beta[unit7]"} is the name of the unit-specific parameter \code{beta} for unit $u=7$. +Model parameters can be extracted and set in this vector-format using the functions \code{coef()} and \code{coef()<-}, respectively. +Alternatively, unit-specific and shared parameters can be extracted using functions \code{specific()} and \code{shared()}, and modified using the equivalent setter functions \code{specific()<-} and \code{shared()<-}. +For example, all shared and a subset of unit-specific parameters (from units 1--3) can be extracted in vector format via +<>= +coef(gomp[1:3]) +@ +\noindent or in a list format using the \code{format = `list'} argument +<>= +coef(gomp[1:3], format = 'list') +@ +\noindent The functions \code{toParamList} and \code{toParamVec} facilitate movement between the vector and list formats +<>= +toParamList(coef(gomp[1:3])) +@ + +\panelPomp seeks to avoid unnecessary duplication with \pomp. +Thus, \panelPomp requires that \pomp is loaded and builds on existing functionality of \pomp where possible. +In particular, a list of \code{pomp} objects for each unit can be extracted from a \code{panelPomp} object via +<<,echo=T,eval=F>>= +as(panelPompObject,"list") +@ +Some methods in the \pkg{pomp} package take advantage of a \code{pompList} class which is defined as a list of \code{pomp} objects. +These methods can be accessed via +<<,echo=T,eval=F>>= +as(panelPompObject,"pompList") +@ +Many critical issues in computational performance of the basic model components for \panelPomp, and utilities for assisting with the user specification of the model, have already received extensive development and testing in the context of \pomp. +All the facilities for constructing POMP models in \pomp are available for constructing the models for each unit in \panelPomp. +This article avoids duplication by referring the reader to \citet{king16} and the \pomp documentation for detailed discussion of constructing \code{pomp} objects. +In Section~\ref{sec:examples}, we identify some case studies that provide useful code for scientific applications of \panelPomp. +In Section~\ref{sec:meth}, we proceed to demonstrate inference for \code{panelPomp} objects, emphasizing methodological issues arising due to the specific requirements of panel data, in the context of a toy example. + +\section{Implementing mechanistic models: case studies} +\label{sec:examples} + +Developing mechanistic statistical models for new scientific applications requires identifying the essential variables and their functional relationships, and obtaining a satisfactory description of stochasticity in both the measurement process and the system dynamics. +This challenging but valuable exercise is assisted by simulation-based software that permits implementation of a general class of models. +Once a suitable model is found, it provides a testable benchmark for subsequent investigations of the system under study and other comparable systems. +Published models and methods, equipped with data and reproducible source code, are essential to maintain this progressive development. + +Since each unit in a \code{panelPomp} is itself a \code{pomp}, we refer to \citet{king16} for full details of how these are specified, and we focus on the new issues arising in \code{panelPomp}. +Recall from Section~\ref{sec:PanelPOMP} that a \code{panelPomp} is constructed from the list of constituent \code{pomp} models together with a collection of shared and unit-specific parameters. +A unit-specific parameter named \code{theta} should be called \code{theta} in each constituent \code{pomp}. +Thus, the value \code{theta[unit7]} specific to unit 7 is just passed as \code{theta} when required by the \code{pomp} model representing this unit. + +Existing examples of \code{panelPomp} analysis have primarily concerned infectious disease dynamics, a topic that has motivated many advances in inference for partially observed stochastic dynamic systems. +We discuss five of these below. +In addition, the close relationship between \code{panelPomp} and \code{pomp} objects means that \code{panelPomp} model constructions can borrow from the considerable existing resources for \pomp. +The remaining examples give some applications in other domains which have been carried out using \code{pomp} models but have extensions to \code{panelPomp} situations. + +\begin{enumerate} +\example{Sexual contacts: behavioral heterogeneity within and between individuals}. +\citet{romero-severson15} developed a PanelPOMP model to investigate a longitudinal prospective survey of sexual contacts, quantifying the roles of behavioral differences between individuals and differences within an individual over time. The \code{contacts()} function in \panelPomp generates one of the models and datasets studied in this paper. The source code to generate this \code{panelPomp} object is at \url{https://github.com/cbreto/panelPomp}. + + +\example{Recurrent infection with HPV}. +\citet{ranjeva17} developed a PanelPOMP model to study the strain dynamics of a longitudinal prospective serological survey of human papillomavirus (HPV). +Their \panelPomp code is available at \url{https://github.com/cobeylab/HPV-model}. + +\example{Polio: asymptomatic infection and local extinction}. +\citet{martinez-bakker15} developed a POMP model for polio transmission to investigate the pre-vaccination epidemics in USA, fitting all parameters separately for each state. +\citet{breto19} found additional precision in inferences when the states are combined into a PanelPOMP with some shared parameters. +The \code{polio()} function in \panelPomp generates this model, and the source code is at \url{https://github.com/cbreto/panelPomp}. + +\example{Age-specific differences in the dynamics of protective immunity to influenza}. +\citet{ranjeva19} developed a PanelPOMP model to interpret longitudinal study of serological measurements on human influenza immunity. +The source code for their \panelPomp model is at \url{https://github.com/cobeylab/Influenza-immune-dynamics}. + +\example{The dynamic struggle between malaria and the immune system}. +\citet{wale19} developed a PanelPOMP model to investigate the dynamics of the immune response to malaria, based on flow cytometry time series for a panel of mice under varying treatments. Their \code{panelPomp} source code and data are available at \url{https://doi.org/10.5061/dryad.nk98sf7pk}. + +\example{Ecological predator-prey dynamics: consumptive and non-consumptive effects}. +\citet{marino19} developed a stochastic seasonal predator-prey POMP model to investigate the relationship between an abundant zooplankton species, {\it Daphnia mendotae}, and its predator, {\it Bythotrephes longimanus}, in Lake Michigan. The source code for the \code{pomp} analysis is available at \url{https://doi.org/10.5061/dryad.bh688ft}. + +\example{Stochastic volatility and financial leverage}. + \citet{breto14} demonstrated the applicability of plug-and-play methods within \pomp to investigate stochastic volatility in finance using a POMP model for a single index. + +\end{enumerate} + +Many other POMP models implemented using \code{pomp} are presented at \url{https://kingaa.github.io/pomp/biblio.html}. + + +\section{Methodology for PanelPOMP models} \label{sec:meth} + +All POMP methods can in principle be extended to PanelPOMPs since a PanelPOMP can be written as a POMP. +Three different ways to represent a PanelPOMP as a POMP were identified by \citet{romero-severson15}: +(i) the panels can be concatenated temporally into a long time series; (ii) the panels can be adjoined to form a high-dimensional POMP with a latent state comprised of a vector of latent states for each unit; (iii) time in the POMP representation can correspond to unit, $u$, with a vector valued state representing the full process for this unit. +The existence of these representations does not necessarily imply that POMP methods will be computationally feasible on the resulting PanelPOMP. +In particular, sequential Monte Carlo algorithms can have prohibitive scaling difficulties with the high dimensional latent states that can be involved with representations (ii) and (iii). + +Here, we focus on describing and demonstrating the plug-and-play likelihood-based inference workflow used in the scienfic examples of Section~\ref{sec:examples}. +This approach builds on likelihood evaluation via the particle filter using \code{pfilter()} and likelihood maximization via iterated filtering using \code{mif2()}. +These algorithms can be formally justified in terms of representation (i) above \citep{breto19}, though the numerical implementation does not in practice have to explicitly construct the concatenation of the \code{panelPomp} object into a \code{pomp} object. + +\subsection{Log likelihood evaluation via panel particle filtering} + +The particle filter, also known as sequential Monte Carlo, is a standard tool for log likelihood evaluation on non-Gaussian POMP models. +The log likelihood function is a central component of Bayesian and frequentist inference. +Due to the dynamic independence assumed between units, particle filtering can be carried out separately on each unit. +The \code{pfilter} method for \code{panelPomp} objects is therefore a direct extension of the \code{pfilter} method for \code{pomp} objects from the \pomp package. +Repeating \code{pfilter} is advisable to reduce the Monte Carlo error on the log likelihood evaluation and to quantify this error. +The following code carries out replicated evaluations of the log likelihood of \code{gomp}, taking advantage of multicore computation. +The Gompertz model is a convenient for testing methodology for nonlinear non-Gaussian models since it has a logarithmic transformation to a linear Gaussian process and therefore the exact likelihood is computable by the Kalman filter \citep{king16}. + +<>= +pf_results <- foreach(i=1:10) %dopar% pfilter(gomp, + Np=switch(run_level,10,200,1000)) +@ + +<>= +lambda_1 <- logmeanexp(sapply(pf_results,logLik),se=TRUE) +@ + +<>= +lambda_2 <- panel_logmeanexp(sapply(pf_results,unitlogLik), + MARGIN=1,se=TRUE) +@ + +<>= +lambda_exact <- panelGompertzLikelihood(coef(gomp),gomp,coef(gomp)) +@ + +<>= +stew(file=paste0(rda_dir,"pf.rda"),{ +<> +<> +<> + if(!VERBOSE) rm("pf_results") +}) +load(paste0(rda_dir,"pf.rda")) # to access system time +pf_time <- .system.time +@ +This took \Sexpr{myRound(pf_time["elapsed"]/60,2)} minutes using \Sexpr{cores} cores, resulting in a list of objects of class \code{pfilterd.ppomp}. +We can use \code{logLik} to extract the Monte Carlo likelihood esimate $\loglik^{[\nrep]}$ for each replicate $\nrep$, and \code{unitlogLik} to extract the vector of component Monte Carlo likelihood esimates $\loglik^{[\nrep]}_{\unit}$ for each unit $\unit=1,\dots,\Unit$, where $\loglik^{[\nrep]}=\sum_{\unit=1}^{\Unit}\loglik^{[\nrep]}_{\unit}$. +For a POMP model, replicated particle filter likelihood evaluations are usually averaged on the natural scale, rather than the log scale, to take advantage of the unbiasedness of the particle filter likelihood estimate. +Thus, we have +\[ +\hat\loglik_1 = \log \frac{1}{\Nrep}\sum_{\nrep=1}^{\Nrep} +\exp +\left\{ + \sum_{\unit=1}^{\Unit} \loglik^{[\nrep]}_{\unit} +\right\} +\] +which can be implemented as +<<,echo=T,eval=F>>= +<> +@ +giving $\hat\loglik_1=\Sexpr{myRound(lambda_1[1],1)}$ with a jack-knife standard error of $\Sexpr{myRound(lambda_1[2],1)}$. +Taking advantage of the independence of the units in the panel structure, \citet{breto19} showed it is preferable to average the replicates of marginal likelihood for each unit before taking a product over units. +This corresponds to +\[ +\hat\loglik_2 = \log \prod_{\unit=1}^\Unit \frac{1}{\Nrep}\sum_{\nrep=1}^{\Nrep} \exp \big\{ \hat \loglik^{[\nrep]}_{\unit} \big\} +\] +which can be implemented as +<<,echo=T,eval=F>>= +<> +@ +giving $\hat\loglik_2=\Sexpr{myRound(lambda_2[1],1)}$ with a jack-knife standard error of $\Sexpr{myRound(lambda_2[2],1)}$. +For this model, a Kalman filter likelihood evaluation gives an exact answer, $\loglik=\Sexpr{myRound(lambda_exact,1)}$. + +\subsection{Maximum likelihood estimation via Panel Iterated Filtering} \label{sec:pif} + +Iterated filtering algorithms carry out repeated particle filtering operations on an extended version of the model that includes time-varying perturbations of parameters. +At each iteration, the magnitude of the perturbations is decreased, and in a suitable limit the algorithm approaches a local maximum of the likelihood function. +The IF2 iterated filtering algorithm \citep{ionides15} has been used for likelihood-based inference on various POMP models arising in epidemiology and ecology \citep[reviewed by][]{breto18}, superseding the previous IF1 algorithm of \citet{ionides06}. +IF2 is implemented in \pomp as the \code{mif2} method for class \code{pomp}. +A panel iterated filtering (PIF) algorithm, extending IF2 to panel data, was developed by \citet{breto19}. +An implementation of PIF in \panelPomp is provided by the \code{mif2} method for class \code{panelPomp}, following the pseudocode in Algorithm~\ref{alg:pif}. +The pseudocode in Algorithm~\ref{alg:pif} sometimes omits explicit specification of ranges over which variables are to be computed when this is apparent from the context: +it is understood that $\np$ takes values in $\seq{1}{\Np}$, $\nshared$ in +$\seq{1}{\Nshared}$ and $\nspecific$ in $\seq{1}{\Nspecific}$. +The $N[0,1]$ notation corresponds to the construction of independent standard normal random variables, leading to to Gaussian perturbations of parameters on a transformed scale. +These perturbations could follow an arbitrary distribution within the theoretical frameworks of IF2 and PIF. + + +\begin{algorithm}[t!] + \caption{ +% \textbf{Panel iterated filtering}. + \texttt{mif2$\big($pp, Nmif{\argequals}$M$, Np{\argequals}$J$, +start{\argequals}$(\shared^0_{\nshared},\specific^0_{\nspecific,\unit})$, +rw\_sd{\argequals}$(\sigma^\Shared_{\nshared,\time},\sigma^\Specific_{\nspecific,\unit,\time})$, +cooling.factor.50{\argequals}$\rho^{50}\big)$}, where \code{pp} is a \code{panelPomp} object containing data and defined \code{rprocess}, \code{dmeasure}, \code{rinit} and \code{partrans} components. + \label{alg:pif} + } +\noindent\begin{tabular}{ll} +{\bf input:}\rule[-1.5mm]{0mm}{6mm} +& Data, $y_{\unit,n}^*$, $u$ in $\seq{1}{U}$, $n$ in $\seq{1}{N}$\\ +& Simulator of initial density, $f_{X_{\unit,0}}(x_{\unit,0}\giventh \shared,\specific_{\unit})$ \\ +& Simulator of transition density, $f_{X_{\unit,n}|X_{\unit,n-1}}(x_{\unit,n}\given x_{\unit,n-1}\giventh \shared,\specific_{\unit})$ \\ +& Evaluator of measurement density, $f_{Y_{\unit,n}|X_{\unit,n}}(y_{\unit,n}\given x_{\unit,n}\giventh\shared,\specific_{\unit})$ \\ +& Number of particles, $J$, and number of iterations, $\Nmif$\\ +& Starting shared parameter swarm, $\Shared^0_{\nshared,\np}=\shared^0_{\nshared}$, $a$ in $\seq{1}{A}$, $j$ in $\seq{1}{J}$\\ +& Starting unit-specific parameter swarm, $\Specific^0_{\nspecific,\unit,\np}=\specific^0_{\nspecific,\unit}$, $b$ in $\seq{1}{B}$, $j$ in $\seq{1}{J}$\\ +& Random walk intensities, +$\sigma^\Shared_{\nshared,\time}$ and $\sigma^\Specific_{\nspecific,\unit,\time}$ \\ +& Parameter transformations, $h^{\Shared}_{\nshared}$ and $h^{\Specific}_{\nspecific}$, with inverses + $\big(h^{\Shared}_{\nshared}\big)^{-1}$ and $\big(h^{\Specific}_{\nspecific}\big)^{-1}$ +\\ +{\bf output:}\rule[-1.5mm]{0mm}{6mm} +& Final parameter swarm, $\Shared^M_{\nshared,\np}\;$ and $\;\Specific^M_{\nspecific,\unit,\np}$ +\rule[-2mm]{0mm}{5mm} +\end{tabular} + +\hrule + +\noindent\begin{tabular}{l} +For $\nmif$ in $1\mycolon M$\rule[0mm]{0mm}{5mm}\\ +\asp $\Shared^m_{\nshared,0,\np}=\Shared^{m-1}_{\nshared,\np}$ +% and $\Specific^m_{\nspecific,\unit,0,\np}=\Specific^{m-1}_{\nspecific,\unit,\np}$ +\mystretch\\ +\asp For $\unit$ in $1\mycolon \Unit$\\ +\asp \asp $\Shared^{F,m}_{\nshared,\unit,0,j} =\big(h^{\Shared}_{\nshared}\big)^{-1} + \left( + h^\Shared_\nshared \big(\Shared^{m}_{\nshared,\unit-1,\np}\big)+ + \rho^{m}\sigma^{\Shared}_{\nshared,0} Z^{\Shared,m}_{\nshared,\unit,0,\np} + \right)$ for $ Z^{\Shared,m}_{\nshared,\unit,0,\np}\sim N[0,1]$ \myBigStretch\\ +\asp \asp $\Specific^{F,m}_{\nspecific,\unit,0,j} =\big(h^{\Specific}_{\nspecific}\big)^{-1} + \left( + h^\Specific_\nspecific \big(\Specific^{m-1}_{\nspecific,\unit,\np}\big)+ + \rho^{m}\sigma^{\Specific}_{\nspecific,u,0} Z^{\Specific,m}_{\nspecific,\unit,0,\np} + \right)$ for $ Z^{\Specific,m}_{\nspecific,\unit,0,\np}\sim N[0,1]$ \myBigStretch\\ +\asp \asp $X_{\unit,0,j}^{F,m}\sim f_{X_{\unit,0}} + \left( + x_{\unit,0} \; \giventh \; \Shared^{F,m}_{\nshared,\unit,0,j} , + \Specific^{F,m}_{\nspecific,\unit,0,j} + \right)$ \myBigStretch\\ +\asp \asp For $n$ in $1\mycolon N_{\unit}$\\ +\asp \asp \asp $\Shared^{P,m}_{\nshared,\unit,\time,j} =\big(h^{\Shared}_{\nshared}\big)^{-1} + \left( + h^\Shared_\nshared \big(\Shared^{F,m}_{\nshared,\unit,\time-1,\np}\big)+ + \rho^{m}\sigma^{\Shared}_{\nshared,\time} Z^{\Shared,m}_{\nshared,\unit,\time,\np} + \right)$ for $ Z^{\Shared,m}_{\nshared,\unit,\time,\np}\sim N[0,1]$ \myBigStretch\\ +\asp \asp \asp $\Specific^{P,m}_{\nspecific,\unit,\time,j} =\big(h^{\Specific}_{\nspecific}\big)^{-1} + \left( + h^\Specific_\nspecific \big(\Specific^{F,m}_{\nspecific,\unit,\time-1,\np}\big)+ + \rho^{m}\sigma^{\Specific}_{\nspecific,u,\time} Z^{\Specific,m}_{\nspecific,\unit,\time,\np} + \right)$ for $ Z^{\Specific,m}_{\nspecific,\unit,\time,\np}\sim N[0,1]$ \myBigStretch\\ +\asp \asp \asp $X_{\unit,n,j}^{P,m}\sim f_{X_{\unit,n}|X_{\unit,n-1}} + \left( + x_{\unit,n} \; \big| \; X^{F,m}_{\unit,n-1,j} \; \giventh \; + \Shared^{P,m}_{\nshared,\unit,\time,j} \, , + \Specific^{P,m}_{\nspecific,\unit,\time,j} + \right)$ \myBigStretch\\ +\asp \asp \asp $w_{\unit,n,j}^m = f_{Y_{\unit,n}|X_{\unit,n}} + \left(y^*_{\unit,n} \; \big| \; X_{\unit,n,j}^{P,m} \; \giventh \; + \Shared^{P,m}_{\nshared,\unit,\time,j} \, , + \Specific^{P,m}_{\nspecific,\unit,\time,j} + \right)$ \myBigStretch\\ +\asp \asp \asp Draw $k_{1:J}$ with $\prob(k_j=i)= w_{\unit,n,i}^m\Big/\sum_{q=1}^J w_{\unit,n,q}^m$ \myBigStretch \\ +\asp \asp \asp $\Shared^{F,m}_{\nshared,\unit,\time,\np}=\Shared^{P,m}_{\nshared,\unit,\time,k_{\np}}$, $\;\; \Specific^{F,m}_{\nspecific,\unit,\time,\np}=\Specific^{P,m}_{\nspecific,\unit,\time,k_{\np}} \;$ +and $\; X^{F,m}_{\unit,n,j}=X^{P,m}_{\unit,n,k_j}$ \mystretch\\ +\asp \asp End For \\ %%% end n loop +\asp \asp $\Shared^{m}_{\nshared,\unit,\np}=\Shared^{F,m}_{\nshared,\unit,N_{\unit},\np}$ and + $\Specific^{m}_{\nspecific,\unit,\np}=\Specific^{F,m}_{\nspecific,\unit,N_{\unit},\np}$ + \\ +\asp End For \\ %%% end u loop +\asp $\Shared^{m}_{\nshared,\np}=\Shared^m_{\nshared,\Unit,\np}$ +\\ +End For \\ %%% end m loop +\end{tabular} +\end{algorithm} + +At a conceptual level, the PIF algorithm has an evolutionary analogy: successive iterations mutate parameters and select among the fittest outcomes measured by Monte Carlo likelihood evaluation. +The theory allows considerable flexibility in how the parameters are perturbed, but Gaussian perturbations on an appropriate scale are typically adequate. +Most often, the perturbation parameters +$\sigma^\Shared_{\nshared,\time}$ and $\sigma^\Specific_{\nspecific,\unit,\time}$ in Algorithm~\ref{alg:pif} will not depend on $\time$. +For parameters set to have uncertainty on a unit scale, the value 0.02 demonstrated here has been commonly used. +The help documentation on the \code{rw\_sd} argument gives instruction on using additional structure should it become necessary. + +For positive parameters, a logarithmic transform can achieve both tasks of removing the boundary and placing uncertainty on a unit scale. +For the panel Gompertz model, all the parameters are non-negative valued, and so the \code{panelGompertz()} code calls \code{panelPomp} with an argument +<<,eval=F,echo=T>>= +partrans=parameter_trans(log=c("K","r","sigma","tau","X.0")) +@ +Inference methodology can call \code{partrans(...,dir="toEst")} to work with parameters on a suitable scale, usually one where additive variation is meaningful. +The methodology can revert to the original parameterization, presumably chosen to be scientifically convenient or meaningful, using \code{partrans(...,dir="fromEst")}. +Thus, a user who does not have to look `under the hood' never has to be directly concerned with parameters on the transformed scale, beyond assigning the transformation. + +For Monte Carlo maximization, replication from diverse starting points is recommended. +We demonstrate such a maximization search on \code{gomp}. +For simplicity, we fix $K_{\unit}=1$ and the initial condition $X_{\unit,0}=1$, maximizing over two shared parameters, $r$ and $\sigma$, and one unit-specific parameter $\tau_\unit$. +To define the diverse starting points, we make uniform draws from a specified box. +We are not promising that the search will stay within this box, and indeed we should be alert to the possibility that the data lead us elsewhere. +However, if replicated searches started from this box reliably reach a consensus, we claim we have carefully investigated this part of parameter space. +A larger box leads to greater confidence that the relevant part of the parameter space has been searched, at the expense of requiring additional work. +The \code{runif\_panel\_design} function constructs a matrix of random draws from the box. +<>= +starts <- runif_panel_design( + lower = c('r' = 0.05, 'sigma' = 0.05, 'tau' = 0.05, 'K' = 1, 'X.0' = 1), + upper = c('r' = 0.2, 'sigma' = 0.2, 'tau' = 0.2, 'K' = 1, 'X.0' = 1), + specific_names = c('K', 'tau', 'X.0'), + unit_names = names(gomp), + nseq=switch(run_level,2,4,6) +) +@ + +We then carry out a search from each starting point: + +<>= +mif_results <- foreach(start=iter(starts,"row")) %dopar% { + mif2(gomp, start=unlist(start), + Nmif = switch(run_level,2,20,150), + Np = switch(run_level,10,500,1500), + cooling.fraction.50=0.5, + cooling.type="geometric", + transform=TRUE, + rw.sd=rw_sd(r=0.02,sigma=0.02,tau=0.02) + ) +} +@ + +<>= +stew(file=paste0(rda_dir,"mif.rda"),{ +<> +}) +load(file=paste0(rda_dir,"mif.rda")) +mif_time <- .system.time +@ + +This took \Sexpr{myRound(mif_time["elapsed"]/60,2)} minutes using \Sexpr{cores} cores, producing a list of objects of class \code{mifd.ppomp}. +The algorithmic parameters are very similar to those of the \code{mif2} method for class \code{pomp}. +The perturbations, determined by the \code{rw\_sd} argument, may be a list giving separate instructions for each unit. +When only one specification for a unit-specific parameter is given (as we do for $K_\unit$ here) the same perturbation is used for all units. + +We can check on convergence of the searches, and possibly diagnose improvements in the choices of algorithmic parameters, by consulting trace plots of the searches available via the \code{traces} method for class \code{mifd.ppomp}. +This follows recommendations by \citep{ionides06} and \citep{king16}. + +An issue characteristic of PanelPOMP models is using the panel structure to facilitate the large number of parameters arising when unit-specific parameters are specified for a large number of units. +For a fixed value of the shared parameters, the likelihood of the unit-specific parameters factorizes over the units. +The factorized likelihood can be maximized separately over each unit, replacing a challenging high-dimensional problem with many relatively routine low-dimensional problems. +This suggests a block maximization strategy where unit-specific parameters for each unit are maximized as a block. +\citet{breto19} used a simple block strategy where a global search over all parameters is followed by a block maximization over units for unit-specific parameters. +We demonstrate this here, refining each of the maximization replicates above. +The following function carries out a maximization search of unit-specific parameters for a single unit. +The call to \code{mif2} takes advantage of argument recycling: all algorithmic parameters are re-used from the construction of \code{mifd\_gomp} except for the respecified random walk standard deviations which ensures that only the unit-specific parameters are perturbed. + +<>= +mif_unit <- function(unit,mifd_gomp,reps=switch(run_level,2,4,6)){ + unit_gomp <- unit_objects(mifd_gomp)[[unit]] + mifs <- replicate(n=reps,mif2(unit_gomp,rw.sd=rw_sd(tau=0.02))) + best <- which.max(sapply(mifs,logLik)) + coef(mifs[[best]])["tau"] +} +@ + +Now we apply this block maximization to find updated unit-specific parameters for each replicate, and we insert these back into the \code{panelPomp} + +<>= +mif_block <- foreach(mf=mif_results) %dopar% { + mf@specific["tau",] <- sapply(1:length(mf),mif_unit,mifd_gomp=mf) + mf +} +@ + +<>= +stew(file=paste0(rda_dir,"block.rda"),{ +<> +}) +load(file=paste0(rda_dir,"block.rda")) +block_time <- .system.time +@ + +This took \Sexpr{myRound(block_time["elapsed"]/60,2)} minutes. + +We expect Monte Carlo estimates of the maximized log likelihood functions to fall below the actual (usually unknown) value. +This is in part because imperfect maximization can only reduce the maximized likelihood, and in part a consequence of Jensen's inequality applied to the likelihood evaluation: the unbiased SMC likelihood evaluation has a negative bias on estimation of the log likelihood. + +\subsection{Monte Carlo profile likelihood} + +The profile likelihood function is constructed by fixing one focal parameter at a range of values and then maximizing the likelihood over all other parameters for each value of the focal parameter. +Constructing a profile likelihood function has several practical advantages. +\begin{enumerate} +\item Evaluations at neighboring values of the focal parameter provide additional Monte Carlo replication. Typically, the true profile log likelihood is smooth, and asymptotically close to quadratic under regularity conditions, so deviations from a smooth fitted line can be interpreted as Monte Carlo error. +\item Large-scale features of the profile likelihood reveal a region of the parameter space outside which the model provides a poor explanation of the data. +\item Co-plots, showing how the values of other maximized parameters vary along the profile, may provide insights into parameter tradeoffs implied by the data. +\item The smoothed Monte Carlo profile log likelihood can be used to construct an approximate 95\% confidence interval. +The resulting confidence interval can be properly adjusted to accommodate both statistical and Monte Carlo uncertainty \citep{ionides17}. +\end{enumerate} + +Once we have code for maximizing the likelihood, only minor adaptation is needed to carry out the maximizations for a profile. +The \code{runif\_panel\_design} generating the starting values is replaced by a call to \code{profile\_design}, which assigns the focal parameter to a grid of values and randomizes the remaining parameters. +The random walk standard deviation for the focal parameter is unassigned, which leads it to be set to zero and therefore the parameter remains fixed during the maximization process. +The following code combines the joint and block maximizations developed above. + + +<>= +# Names of the estimated parameters +estimated <- c( + "r", "sigma", paste0("tau[unit", 1:length(gomp), "]") +) + +# Names of the fixed parameters (not estimated) +fixed <- names(coef(gomp))[!names(coef(gomp)) %in% estimated] + +profile_starts <- profile_design( + r = seq(0.05, 0.2, length = switch(run_level, 10, 10, 20)), + lower = c(coef(gomp)[estimated] / 2, coef(gomp)[fixed])[-1], + upper = c(coef(gomp)[estimated] * 2, coef(gomp)[fixed])[-1], + nprof = 2, type = "runif" +) + +profile_results <- foreach(start=iter(profile_starts,"row")) %dopar% { + mf <- mif2( + mif_results[[1]], + start = unlist(start), + rw.sd = rw_sd(sigma = 0.02, tau = 0.02)) + mf@specific["tau", ] <- sapply(1:length(mf), mif_unit,mifd_gomp = mf) + mf +} +@ + +<>= +stew(file=paste0(rda_dir,"profile.rda"),{ +<> +}) +load(file=paste0(rda_dir,"profile.rda")) +profile_time <- .system.time +@ + + +The profile searches took \Sexpr{myRound(profile_time["elapsed"]/60,2)} minutes. +However, we are not quite done gathering the results for the profile. +The perturbed filtering carried out by \code{mif2} leads to an approximate likelihood evaluation, but for our main results it is better to re-evaluate the likelihood without perturbations. +Also, replication is recommended to reduce and quantify Monte Carlo error. +We do this, and tabulate the results. + +<>= +profile_table <- foreach(mf=profile_results,.combine=rbind) %dopar% { + LL <- replicate(switch(run_level,2,5,10), + logLik(pfilter(mf,Np=switch(run_level,10,500,2500))) + ) + LL <- logmeanexp(LL,se=TRUE) + data.frame(t(coef(mf)),loglik=LL[1],loglik.se=LL[2]) +} +@ + + +<>= +stew(file=paste0(rda_dir,"profile_table.rda"),{ +<> +}) +load(file=paste0(rda_dir,"profile_table.rda")) +profile_table_time <- .system.time +@ + + +The likelihood evaluations took \Sexpr{myRound(profile_table_time["elapsed"]/60,2)} minutes. +It is appropriate to spend comparable time evaluating the likelihood to the time spent maximizing it: a high quality maximization without high quality likelihood evaluation is hard to interpret, whereas good evaluations of the likelihood in a vicinity of the maximum can inform about the shape of the likelihood surface in this region which may be as relevant as knowing the exact maximum. + +The Monte Carlo adjusted profile (MCAP) approach of \citet{ionides17} is implemented by the \code{mcap()} function in \pomp. +This function constructs a smoothed profile likelihood, by application of the \code{loess} smoother. +It computes a local quadratic approximation that is used to derive an extension to the classical profile likelihood confidence interval that makes allowance for Monte Carlo error in the calculation of the profile points. +Theoretically, an MCAP procedure can obtain statistically efficient confidence intervals even when the Monte Carlo error in the profile likelihood is asymptotically growing and unbounded \citep{ning21}. +Log likelihood evaluation has negative bias, as a consequence of Jensen's inequality for an unbiased likelihood estimate. +This bias produces a vertical shift in the estimated profile, which fortunately does not have consequence for the confidence interval if the bias is slowly varying. + +<>= +profile_r <- unique(profile_starts[,"r"]) +start_matrix <- matrix(coef(gomp),ncol=length(coef(gomp)), + nrow=length(profile_r),byrow=T,dimnames=list(NULL,names(coef(gomp)))) +start_matrix[,"r"] <- profile_r +estNames <- c("sigma",paste0("tau[unit",1:length(gomp),"]")) +stew(file=paste0(rda_dir,"profile_kalman.rda"),{ + exact_profile <- foreach(start=iter(start_matrix,"row")) %dopar% { + x <- as.vector(start) + names(x) <- colnames(start) + optim( + par=x[estNames], + fn=panelGompertzLikelihood, + panelPompObject=gomp, + params=x, + hessian=FALSE, + control=list(trace=0,fnscale=-1) + ) + } +}) +load(file=paste0(rda_dir,"profile_kalman.rda")) +exact_profile_time <- .system.time +exact_profile_table <- cbind(r=profile_r, + logLik=sapply(exact_profile,function(x) x$value)) +@ + +The profile points evaluated above, and stored in \code{profile\_table}, can be used to compute a 95\% MCAP confidence interval as follows: +<>= +gomp_mcap <- pomp::mcap(logLik=profile_table$loglik, + parameter=profile_table$r, + level=0.95) +@ +The construction of the confidence interval is best shown by a plot of the smoothed profile likelihood (Fig.~\ref{fig:mcap-plot}). +In this toy example, the exact likelihood can be calculated using the Kalman filter, and this is carried out by the \code{panelGompertzLikelihood} function. +The likelihood can then be maximized using a general-purpose optimization procedure such as \code{optim()} in R. +With large numbers of parameters, and no guarantee of convexity, this numerical optimization is not entirely routine. +One might consider a block optimization strategy, but here we carry out a simple global search, which took \Sexpr{myRound(exact_profile_time["elapsed"]/60,2)} minutes to compute the profile likelihood, once parallelized. +The deterministic search is also not entirely smooth, and so we apply MCAP as for the Monte Carlo search. +Both deterministic and Monte Carlo optimizations can benefit from a block optimization strategy which alternates between shared and unit-specific parameters \citep{breto19}. +Such algorithms can be built using the \panelPomp functions we have demonstrated, and they will be incorporated into the package once they have been more extensively researched. + +<>= + + par(mai=c(0.7,0.7,0.3,0.3)) + mc <- gomp_mcap + ylim=range(c(mc$fit$smoothed,mc$logLik,exact_profile_table[,"logLik"])) + plot(mc$logLik ~ mc$parameter, xlab="",ylab="",ylim=ylim, col="red") + mtext(side=2,text="profile log likelihood",line=2.5) + mtext(side=1,text="r",line=2.5) + lines(mc$fit$parameter,mc$fit$smoothed, col = "red", lwd = 1.5) + abline(v=mc$ci,col="red") + abline(h=max(mc$fit$smoothed,na.rm=T)-mc$delta,col="red") + +# lines(exact_profile_table[,"r"],exact_profile_table[,"logLik"],lty="dashed") + optim_mcap <- pomp::mcap(logLik=exact_profile_table[,"logLik"], + parameter=exact_profile_table[,"r"], + level=0.95) + mc2 <- optim_mcap + points(mc2$parameter,mc2$logLik,pch=0) + lines(mc2$fit$parameter,mc2$fit$smoothed, col = "black", lty="dashed") + abline(v=mc2$ci,col="black",lty="dashed") + abline(h=max(mc2$fit$smoothed,na.rm=T)-mc2$delta,col="black",lty="dashed") + +@ + +\section{Conclusion} + +The analysis demonstrated in Section~\ref{sec:meth} gives one approach to plug-and-play inference for PanelPOMP models, but the scope of \panelPomp is far from limited to this approach. +\panelPomp is a general and extensible framework which encourages the development of additional functionality. +The \code{panelPomp} class and the corresponding workhorse functions provide an applications interface available to other future methodologies. +In this sense, \panelPomp provides an environment for sharing and developing PanelPOMP models and methods, both via future contributions to the \panelPomp package and via open source applications using \panelPomp. +This framework will facilitate comparison of new future methodology with existing methodology. + +Likelihood evaluation and maximization was used to construct confidence intervals in Section~\ref{sec:meth}. +These calculations also provide a foundation for other techniques of likelihood-based inference, such as likelihood ratio hypothesis tests and model selection via Akaike's information criterion (AIC). +The examples discussed in Section~\ref{sec:examples} provide case studies in the use of these methods for scientific work. + +Data analysis using large data sets or complex models may require considerable computing time. +Simulation-based methodology is necessarily computationally intensive, and access to a cluster computing environment extends the size of problems that can be tackled. +The workflow in Section~\ref{sec:meth} has a simple parallel structure that can readily take advantage of additional resources. +Embarrassingly parallel computations, such as computing the profile likelihood function at a grid of points, or replicated evaluations of the likelihood function, can be parallelized using the \pkg{foreach} package. + +Panel data is widely available: for many experimental and observational systems it is more practical to collect short time series on many units than to obtain one long time series. +For time series data, fitting mechanistic models specified as partially observed Markov processes has found numerous applications for formulating and answering scientific hypotheses. \citep{breto09,king16}. +However, there are remarkably few examples in the literature fitting mechanistic nonlinear non-Gaussian partially observed stochastic dynamic models to panel data. +The \panelPomp package offers opportunities to remedy this situation. + +The source code for \panelPomp is at \url{https://github.com/cbreto/panelPomp}. +Unit tests that cover 100\% of the code are provided at \url{https://github.com/cbreto/panelPomp/tests}, and these tests also provide useful examples of calls to the functions within \panelPomp. +The source code for this article is at \url{https://github.com/cbreto/panelPomp/vignettes/articles/package_tutorial}. + +\section*{Acknowledgments} +The results in this paper were obtained using \R~\Sexpr{paste(R.Version()[6:7], collapse = ".")} with \pkg{panelPomp}~\Sexpr{packageVersion("panelPomp")} and pkg{pomp}~\Sexpr{packageVersion("pomp")}. +This work was supported by National Science Foundation grants DMS-1761603 and DMS-1646108, and National Institutes of Health grants 1-U54-GM111274 and 1-U01-GM110712. + +\bibliographystyle{jasa3} + +% References +\bibliography{bg} +\end{document} diff --git a/vignettes/articles/package_tutorial/tutorial.pdf b/vignettes/articles/package_tutorial/tutorial.pdf new file mode 100644 index 0000000..bd8f972 Binary files /dev/null and b/vignettes/articles/package_tutorial/tutorial.pdf differ diff --git a/vignettes/articles/package_tutorial/tutorial.tex b/vignettes/articles/package_tutorial/tutorial.tex new file mode 100644 index 0000000..17ae57c --- /dev/null +++ b/vignettes/articles/package_tutorial/tutorial.tex @@ -0,0 +1,955 @@ +\documentclass[12pt]{article}\usepackage[]{graphicx}\usepackage[table]{xcolor} +% maxwidth is the original width if it is less than linewidth +% otherwise use linewidth (to make sure the graphics do not exceed the margin) +\makeatletter +\def\maxwidth{ % + \ifdim\Gin@nat@width>\linewidth + \linewidth + \else + \Gin@nat@width + \fi +} +\makeatother + +\definecolor{fgcolor}{rgb}{0.345, 0.345, 0.345} +\newcommand{\hlnum}[1]{\textcolor[rgb]{0.686,0.059,0.569}{#1}}% +\newcommand{\hlsng}[1]{\textcolor[rgb]{0.192,0.494,0.8}{#1}}% +\newcommand{\hlcom}[1]{\textcolor[rgb]{0.678,0.584,0.686}{\textit{#1}}}% +\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}% +\newcommand{\hldef}[1]{\textcolor[rgb]{0.345,0.345,0.345}{#1}}% +\newcommand{\hlkwa}[1]{\textcolor[rgb]{0.161,0.373,0.58}{\textbf{#1}}}% +\newcommand{\hlkwb}[1]{\textcolor[rgb]{0.69,0.353,0.396}{#1}}% +\newcommand{\hlkwc}[1]{\textcolor[rgb]{0.333,0.667,0.333}{#1}}% +\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.737,0.353,0.396}{\textbf{#1}}}% +\let\hlipl\hlkwb + +\usepackage{framed} +\makeatletter +\newenvironment{kframe}{% + \def\at@end@of@kframe{}% + \ifinner\ifhmode% + \def\at@end@of@kframe{\end{minipage}}% + \begin{minipage}{\columnwidth}% + \fi\fi% + \def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep + \colorbox{shadecolor}{##1}\hskip-\fboxsep + % There is no \\@totalrightmargin, so: + \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% + \MakeFramed {\advance\hsize-\width + \@totalleftmargin\z@ \linewidth\hsize + \@setminipage}}% + {\par\unskip\endMakeFramed% + \at@end@of@kframe} +\makeatother + +\definecolor{shadecolor}{rgb}{.97, .97, .97} +\definecolor{messagecolor}{rgb}{0, 0, 0} +\definecolor{warningcolor}{rgb}{1, 0, 1} +\definecolor{errorcolor}{rgb}{1, 0, 0} +\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX + +\usepackage{alltt} +\usepackage{amsmath} +\usepackage{graphicx} +%\usepackage{enumerate} +\usepackage{url} % not crucial - just used below for the URL +\usepackage{amsfonts} +\usepackage{setspace} +\usepackage{caption} +\usepackage{subcaption} +\usepackage[ruled,noline]{algorithm2e} +\usepackage[table]{xcolor} +\usepackage{mathtools} +\usepackage{authblk} +\usepackage{xspace} +\usepackage{natbib} +\DeclarePairedDelimiter\ceil{\lceil}{\rceil} +\DeclarePairedDelimiter\floor{\lfloor}{\rfloor} +\DeclareMathOperator*{\argmax}{arg\,max} + +%\pdfminorversion=4 +% NOTE: To produce blinded version, replace "0" with "1" below. +\newcommand{\blind}{0} + +\newcommand{\proglang}[1]{\textsf{#1}} +\newcommand\figTitle{\bf} +\newcommand\pkg{\texttt} +\newcommand\code{\texttt} +\newcommand\eic[1]{\textcolor{orange}{[EI: #1]}} +\newcommand\jwc[1]{\textcolor{brown}{[JW: #1]}} +\newcommand\TODO[1]{\textcolor{red}{[TODO: #1]}} +\newcommand\allVar{\psi} +\newcommand\allVarExt{\bar{\psi}} +\newcommand\improvedCell{\cellcolor{blue!25}} +\newcommand\R{\texttt{R}\xspace} +\newcommand\panelPomp{\texttt{panelPomp}\xspace} +\newcommand\pomp{\texttt{pomp}\xspace} +\newcommand\nimble{\texttt{nimble}\xspace} +\newcommand\mcstate{\texttt{mcstate}\xspace} +\newcommand\spatPomp{\texttt{spatPomp}\xspace} +\newcommand\PanelPOMP{PanelPOMP} +\newcommand\example[1]{ \item #1} + +% PSUEDO-CODE HELPERS +\newcommand\mycolon{{\hspace{0.6mm}:\hspace{0.6mm}}} +\newcommand\np{j} +\newcommand\Np{J} % Number of particles +\newcommand\nrep{i} % Replicate index (in likelihood evaluations) +\newcommand\Nrep{I} % Number of replicates (in likelihood evaluations) +\newcommand\unit{u} % Unit index +\newcommand\Unit{U} % Number of panel units +\renewcommand\time{n} +\newcommand\Time{N} +\newcommand\Nmif{M} % Number of iterated filtering steps +\newcommand\nmif{m} % Mif index +\newcommand\Shared{\Phi} +\newcommand\Specific{\Psi} +\newcommand\shared{\phi} +\newcommand\specific{\psi} +\newcommand\Nshared{A} +\newcommand\nshared{a} +\newcommand\Nspecific{B} +\newcommand\nspecific{b} +\newcommand\dimSpecific{{\mathrm{dim}(\Psi)}} +\newcommand\dimShared{{\mathrm{dim}(\Phi)}} +\newcommand\argequals{{\,=\,}} +\newcommand\mystretch{\rule[-2mm]{0mm}{5mm} } +\newcommand\myBigStretch{\rule[-3mm]{0mm}{5mm} } +\newcommand\asp{\hspace{6mm}} + +% MATH HELPERS +\newcommand\RealSpace{\mathbb{R}} +\newcommand\loglik{\lambda} +\newcommand\lik{\ell} +\newcommand\Xspace{{\mathbb X}} +\newcommand\Yspace{{\mathbb Y}} +\newcommand\Rspace{{\mathbb R}} +\newcommand\hatTheta{\widehat{\Theta}} +\newcommand\Xdim{{\mathrm{dim}}(\Xspace)} +\newcommand\Ydim{{\mathrm{dim}}(\Yspace)} +%\newcommand\Thetadim{{{\mathrm{dim}}(\Theta)}} +\newcommand\Thetadim{D} +\newcommand\contactsRate{\Lambda} +\newcommand\given{{\, | \,}} +\newcommand\giventh{{\,;\,}} +\newcommand\normal{\mathrm{Normal}} +\newcommand\seq[2]{{#1}\!:\!{#2}} +\newcommand\prob{\mathbb{P}} + +% MARGINS +\addtolength{\oddsidemargin}{-.5in}% +\addtolength{\evensidemargin}{-1in}% +\addtolength{\textwidth}{1in}% +\addtolength{\textheight}{1.7in}% +\addtolength{\topmargin}{-1in}% + +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} +\begin{document} + +%\bibliographystyle{natbib} + +\def\spacingset#1{\renewcommand{\baselinestretch}% +{#1}\small\normalsize} \spacingset{1} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% \if0\blind +% { +% \title{\bf Likelihood Based Inference for ARMA Models} +% \author{ +% Carles Bret\'o, Universitat de Val\`encia\\ +% Jesse Wheeler, University of Michigan\\ +% Edward L. Ionides, University of Michigan\\ +% Aaron A. King, University of Michigan} +% \maketitle +% } \fi + +\if0\blind +{ + \title{\bf A tutorial on panel data analysis using partially observed Markov processes via the \R package \panelPomp} + + \author[1]{Carles Bret\'o} + \author[2]{Jesse Wheeler} + \author[2]{Edward L. Ionides} + \author[2]{Aaron A. King} + + \affil[1]{Universitat de Val\`encia} + \affil[2]{University of Michigan} + + \maketitle +} \fi + +\if1\blind +{ + \bigskip + \bigskip + \bigskip + \begin{center} + {\Large\bf A tutorial on panel data analysis using partially observed Markov processes via the \R package \panelPomp} +\end{center} + \medskip +} \fi + +\bigskip +\begin{abstract} +\noindent The \R package \panelPomp package supports analysis of panel data via a general class of partially observed Markov process models (PanelPOMP). +We present a tutorial for this package, describing how the mathematical concept of a PanelPOMP is represented in the software, as well as a demonstration of typical use-cases of \panelPomp. +Monte Carlo methods used for POMP models require adaptation for PanelPOMP models due to the higher dimensionality of panel data. +The package takes advantage of recent advances for PanelPOMP, including an iterated filtering algorithm, Monte Carlo adjusted profile methodology and block optimization methodology to assist with the large parameter spaces that can arise with panel models. +In addition, tools for manipulation of models and data are provided that take advantage of the panel structure. +\end{abstract} + +\vfill + + + + +\newpage +\spacingset{1.25} + +\AtBeginEnvironment{algorithm}{% + \singlespacing + % \renewcommand{\arraystretch}{1}% +} + +\section{Introduction} +\label{sec:intro} + +This tutorial describes a typical use-case of the \panelPomp \R package. +Partially observed Markov process (POMP) models---also known as state-space or hidden Markov models---are useful mathematical tools for modeling non-linear dynamic systems. +POMP models describe a system via an unobserved dynamic model that has the Markov property, coupled with a model for how observations are drawn from the latent process. +Various software packages provide platforms for performing statistical analysis of these systems using POMP models (for instance, \pomp \citep{king16}, \spatPomp \citep{asfaw21}, \nimble \citep{michaud21}, and \mcstate \citep{fitzjohn20}). +However, particular challenges arise when modeling \emph{panel data} via POMP models; these data arise when time series are measured on a collection of independent units. +While each unit may be modeled separately, analyzing the data as a single collection can provide insights into the underlying dynamical system that may not be obtained otherwise. +For instance, each time series in the panel may be too short to infer a complex dynamical model, so that inference on an underlying model must combine information across the units. +\panelPomp is, to our knowledge, the first software package specifically addressing these issues. +The utility of \panelPomp has been demonstrated in several scientific applications \citep{ranjeva17,ranjeva19,wale19,domeyer22,lee20}. + +The current version of \panelPomp emphasizes plug-and-play methods \citep{breto09,he10}, also known as likelihood-free \citep{marjoram03,sisson07}, that are applicable to dynamic models for which a simulator is available even when the transition densities are unavailable. +This class of flexible algorithms allow researchers to build their models based on scientific reasoning rather than statistical convenience, typically at the expense of computational efficiency. +In the following sections, this tutorial demonstrates how the \panelPomp package can be used to model nonlinear dynamic systems using plug-and-play methodologies. + +\section{PanelPOMP models} \label{sec:PanelPOMP} + +Panel data is a collection of multiple time series datasets, each possibly multivariate on its own, where each time series is associated with a unit; the units can represent spatial locations, agents in a system, or other units for which data is collected over time. +For convenience of identifying units in the panel, we use numeric labels $\{1,2,\dots,\Unit\}$, which we also write as $1\mycolon\Unit$. +Time series from each unit may be of different lengths, and so we define $N_\unit$ as the number of measurements collected on unit $\unit$. +The observations are modeled as a realization of a stochastic process $Y_{\unit,1:N_\unit}$, observed at times $t_{\unit,1}