From 96738023f526ed9d6aa2131da998f3940dd2b48f Mon Sep 17 00:00:00 2001 From: Simon Wood Date: Wed, 22 Jul 2015 00:00:00 +0000 Subject: [PATCH] version 1.8-7 --- DESCRIPTION | 6 +- MD5 | 55 +- NAMESPACE | 24 +- R/bam.r | 579 +++++- R/efam.r | 46 +- R/fast-REML.r | 345 +++- R/gam.fit3.r | 237 ++- R/gam.fit4.r | 75 +- R/gamm.r | 5 +- R/jagam.r | 34 +- R/mgcv.r | 120 +- R/misc.r | 88 +- R/smooth.r | 72 +- changeLog | 49 + inst/CITATION | 0 inst/po/po/LC_MESSAGES/R-mgcv.mo | Bin 23123 -> 0 bytes inst/po/po/LC_MESSAGES/mgcv.mo | Bin 2576 -> 0 bytes man/Predict.matrix.Rd | 0 man/Predict.matrix.cr.smooth.Rd | 0 man/Predict.matrix.soap.film.Rd | 0 man/Rrank.Rd | 0 man/anova.gam.Rd | 0 man/bam.Rd | 17 +- man/cSplineDes.Rd | 0 man/choose.k.Rd | 0 man/exclude.too.far.Rd | 0 man/extract.lme.cov.Rd | 0 man/fix.family.link.Rd | 0 man/fixDependence.Rd | 0 man/formXtViX.Rd | 0 man/fs.test.Rd | 0 man/full.score.Rd | 0 man/gam.check.Rd | 0 man/gam.control.Rd | 4 +- man/gam.convergence.Rd | 0 man/gam.fit.Rd | 0 man/gam.fit3.Rd | 0 man/gam.models.Rd | 0 man/gam.outer.Rd | 0 man/gam.scale.Rd | 13 +- man/gam.selection.Rd | 0 man/gam.side.Rd | 0 man/gam2objective.Rd | 0 man/gaulss.Rd | 0 man/get.var.Rd | 0 man/inSide.Rd | 0 man/influence.gam.Rd | 0 man/initial.sp.Rd | 4 +- man/jagam.Rd | 10 +- man/ldTweedie.Rd | 0 man/linear.functional.terms.Rd | 0 man/magic.Rd | 0 man/magic.post.proc.Rd | 0 man/mgcv-FAQ.Rd | 0 man/model.matrix.gam.Rd | 0 man/mono.con.Rd | 0 man/mroot.Rd | 0 man/negbin.Rd | 0 man/new.name.Rd | 0 man/notExp.Rd | 0 man/notExp2.Rd | 0 man/null.space.dimension.Rd | 0 man/ocat.Rd | 0 man/pcls.Rd | 0 man/pdIdnot.Rd | 0 man/pdTens.Rd | 0 man/pen.edf.Rd | 0 man/place.knots.Rd | 0 man/polys.plot.Rd | 0 man/print.gam.Rd | 0 man/qq.gam.Rd | 0 man/rTweedie.Rd | 0 man/random.effects.Rd | 0 man/residuals.gam.Rd | 0 man/s.Rd | 0 man/scat.Rd | 0 man/smooth.construct.Rd | 0 man/smooth.construct.ad.smooth.spec.Rd | 0 man/smooth.construct.cr.smooth.spec.Rd | 0 man/smooth.construct.fs.smooth.spec.Rd | 0 man/smooth.construct.ps.smooth.spec.Rd | 0 man/smooth.construct.so.smooth.spec.Rd | 0 man/smooth.construct.t2.smooth.spec.Rd | 0 man/smooth.construct.tensor.smooth.spec.Rd | 0 man/smooth.construct.tp.smooth.spec.Rd | 0 man/smooth.terms.Rd | 0 man/spasm.construct.Rd | 0 man/summary.gam.Rd | 0 man/te.Rd | 0 man/tensor.prod.model.matrix.Rd | 0 man/uniquecombs.Rd | 0 man/vcov.gam.Rd | 0 po/R-de.po | 148 +- po/R-po.po | 2099 -------------------- po/de.po | 4 +- po/po.po | 140 -- src/Makevars | 0 src/discrete.c | 441 ++++ src/gdi.c | 41 +- src/init.c | 13 +- src/mat.c | 231 ++- src/mgcv.h | 20 +- src/misc.c | 21 +- src/qp.h | 0 src/soap.c | 0 105 files changed, 2245 insertions(+), 2696 deletions(-) mode change 100755 => 100644 inst/CITATION delete mode 100644 inst/po/po/LC_MESSAGES/R-mgcv.mo delete mode 100644 inst/po/po/LC_MESSAGES/mgcv.mo mode change 100755 => 100644 man/Predict.matrix.Rd mode change 100755 => 100644 man/Predict.matrix.cr.smooth.Rd mode change 100755 => 100644 man/Predict.matrix.soap.film.Rd mode change 100755 => 100644 man/Rrank.Rd mode change 100755 => 100644 man/anova.gam.Rd mode change 100755 => 100644 man/cSplineDes.Rd mode change 100755 => 100644 man/choose.k.Rd mode change 100755 => 100644 man/exclude.too.far.Rd mode change 100755 => 100644 man/extract.lme.cov.Rd mode change 100755 => 100644 man/fix.family.link.Rd mode change 100755 => 100644 man/fixDependence.Rd mode change 100755 => 100644 man/formXtViX.Rd mode change 100755 => 100644 man/fs.test.Rd mode change 100755 => 100644 man/full.score.Rd mode change 100755 => 100644 man/gam.check.Rd mode change 100755 => 100644 man/gam.convergence.Rd mode change 100755 => 100644 man/gam.fit.Rd mode change 100755 => 100644 man/gam.fit3.Rd mode change 100755 => 100644 man/gam.models.Rd mode change 100755 => 100644 man/gam.outer.Rd mode change 100755 => 100644 man/gam.selection.Rd mode change 100755 => 100644 man/gam.side.Rd mode change 100755 => 100644 man/gam2objective.Rd mode change 100755 => 100644 man/gaulss.Rd mode change 100755 => 100644 man/get.var.Rd mode change 100755 => 100644 man/inSide.Rd mode change 100755 => 100644 man/influence.gam.Rd mode change 100755 => 100644 man/ldTweedie.Rd mode change 100755 => 100644 man/linear.functional.terms.Rd mode change 100755 => 100644 man/magic.Rd mode change 100755 => 100644 man/magic.post.proc.Rd mode change 100755 => 100644 man/mgcv-FAQ.Rd mode change 100755 => 100644 man/model.matrix.gam.Rd mode change 100755 => 100644 man/mono.con.Rd mode change 100755 => 100644 man/mroot.Rd mode change 100755 => 100644 man/negbin.Rd mode change 100755 => 100644 man/new.name.Rd mode change 100755 => 100644 man/notExp.Rd mode change 100755 => 100644 man/notExp2.Rd mode change 100755 => 100644 man/null.space.dimension.Rd mode change 100755 => 100644 man/ocat.Rd mode change 100755 => 100644 man/pcls.Rd mode change 100755 => 100644 man/pdIdnot.Rd mode change 100755 => 100644 man/pdTens.Rd mode change 100755 => 100644 man/pen.edf.Rd mode change 100755 => 100644 man/place.knots.Rd mode change 100755 => 100644 man/polys.plot.Rd mode change 100755 => 100644 man/print.gam.Rd mode change 100755 => 100644 man/qq.gam.Rd mode change 100755 => 100644 man/rTweedie.Rd mode change 100755 => 100644 man/random.effects.Rd mode change 100755 => 100644 man/residuals.gam.Rd mode change 100755 => 100644 man/s.Rd mode change 100755 => 100644 man/scat.Rd mode change 100755 => 100644 man/smooth.construct.Rd mode change 100755 => 100644 man/smooth.construct.ad.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.cr.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.fs.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.ps.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.so.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.t2.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.tensor.smooth.spec.Rd mode change 100755 => 100644 man/smooth.construct.tp.smooth.spec.Rd mode change 100755 => 100644 man/smooth.terms.Rd mode change 100755 => 100644 man/spasm.construct.Rd mode change 100755 => 100644 man/summary.gam.Rd mode change 100755 => 100644 man/te.Rd mode change 100755 => 100644 man/tensor.prod.model.matrix.Rd mode change 100755 => 100644 man/uniquecombs.Rd mode change 100755 => 100644 man/vcov.gam.Rd delete mode 100755 po/R-po.po delete mode 100644 po/po.po mode change 100755 => 100644 src/Makevars create mode 100644 src/discrete.c mode change 100755 => 100644 src/qp.h mode change 100755 => 100644 src/soap.c diff --git a/DESCRIPTION b/DESCRIPTION index 260d48f..9722f65 100755 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,5 +1,5 @@ Package: mgcv -Version: 1.8-6 +Version: 1.8-7 Author: Simon Wood Maintainer: Simon Wood Title: Mixed GAM Computation Vehicle with GCV/AIC/REML Smoothness @@ -15,7 +15,7 @@ Suggests: splines, parallel, survival, MASS LazyLoad: yes ByteCompile: yes License: GPL (>= 2) -Packaged: 2015-03-30 13:02:31 UTC; sw283 NeedsCompilation: yes +Packaged: 2015-07-22 17:38:22 UTC; sw283 Repository: CRAN -Date/Publication: 2015-03-31 12:46:38 +Date/Publication: 2015-07-23 07:05:08 diff --git a/MD5 b/MD5 index e5cd04c..bedcbe9 100644 --- a/MD5 +++ b/MD5 @@ -1,22 +1,22 @@ -2f31dc4982b7fa7c7f08ff3fad94b88d *ChangeLog -2244e729b443669c81a2732d005dd044 *DESCRIPTION +f876a44fccfab3bcf4e8c6a8916f480f *ChangeLog +68b8a0864224d96ff69d755507862f24 *DESCRIPTION eb723b61539feef013de476e68b5c50a *GPL-2 -4d14a8a6cdbe2e25634829667d3251ec *NAMESPACE -bb90db9c74f22d202abc4b0fc562448e *R/bam.r +053c3c177572e1570e63329445eacc14 *NAMESPACE +e99642616897453fa1cab582bc73c84f *R/bam.r 52ea7081e235df890d3e51198bcc3092 *R/coxph.r -a53c7592bb4c10a961f58e6537c87355 *R/efam.r -8cde8b6379034d9eb78caaf1b0d04cf8 *R/fast-REML.r -5b6cfb632772fb514428730b22066480 *R/gam.fit3.r -fc965ed3d5edf72c6f0335347c383dbb *R/gam.fit4.r +209a7a1d10cec589213ffed9efc1d5cc *R/efam.r +9131483dd63ef2b80e8159601cc03922 *R/fast-REML.r +88dbdd15671f0a924bdd9f5f42a89a34 *R/gam.fit3.r +a6208b52c8af1f8779dd0b4b482c3fa7 *R/gam.fit4.r e63276b78a4ff2566af2e651bfc6aa9c *R/gam.sim.r 633c048e36083646c7d409e99cf7b037 *R/gamlss.r -28ddc8624513070a62d0cc2aba7776a2 *R/gamm.r -09240f99d77e54848bf15d540c267022 *R/jagam.r -70eb07e6f5c4d61f6519863b8eebdfc0 *R/mgcv.r -bf5f3e43220b1e6db57c80caf66f1e54 *R/misc.r +ff163969e9ad7a38857907bf38a39ec0 *R/gamm.r +3b0d5cac4a59ef1a8cb325b249699476 *R/jagam.r +d5ce48c9ea98c2f428cfb00bcc9fb5ea *R/mgcv.r +59a92561754c8156506af4004f594ac9 *R/misc.r 66c24aed2f8fc9f6bce321794f8aff87 *R/mvam.r f031621e351c06b12c328e4033e2f097 *R/plots.r -3e87a7ef880c8aa40ce57201f6ea5458 *R/smooth.r +966c267722f1027c53aa5621614333ae *R/smooth.r 68348617c5d3b0e2ea805c4360a8cdd4 *R/soap.r 76cc875719bf0ef9eab45ea5bfeccda6 *R/sparse.r e468195a83fab90da8e760c2c3884bd3 *data/columb.polys.rda @@ -32,8 +32,6 @@ c745969a1292eb3d49dfd0d0c2c997d4 *inst/po/de/LC_MESSAGES/mgcv.mo c1b1475e5fef49fe49929d2796ff87b6 *inst/po/ko/LC_MESSAGES/mgcv.mo cd7e6d1282796c089c320fbff388047f *inst/po/pl/LC_MESSAGES/R-mgcv.mo 715e52c0debf9848bbda15e94f5e7315 *inst/po/pl/LC_MESSAGES/mgcv.mo -6224353bb8556c6ae4106352e244a3ab *inst/po/po/LC_MESSAGES/R-mgcv.mo -4e7e8faae00111b7db61daab04358202 *inst/po/po/LC_MESSAGES/mgcv.mo c574fe1ca9d55a9818d308906f16d16e *man/Beta.Rd 5bf12ddc0dab9daae72271b96a15c539 *man/Predict.matrix.Rd c45c0f78f753461b33a295883461e732 *man/Predict.matrix.cr.smooth.Rd @@ -41,7 +39,7 @@ e9b0a2e31b130cf2cb38618ec50d1919 *man/Predict.matrix.soap.film.Rd f12468625253dd3603907de233762fd6 *man/Rrank.Rd f6cadda5999c35800fd65c08d6812f7b *man/Tweedie.Rd 80f8763baa4987579e2aa56073a9e94e *man/anova.gam.Rd -de9b8ca4369a4fc7ba69c68c1cdb98c2 *man/bam.Rd +e49e38bc1741829ae997f8b0b4fcd832 *man/bam.Rd 71bde8b8caa24a36529ce7e0ac3165d8 *man/bam.update.Rd a2beb811b1093c5e82ef32d7de1f7d32 *man/cSplineDes.Rd 9b4d616d1b6c4a46ca77d16cded3f806 *man/choose.k.Rd @@ -59,13 +57,13 @@ c6fd48d86959402982c75566876baa16 *man/formula.gam.Rd 6f405acde2d7b6f464cf45f5395113ba *man/full.score.Rd 86678ba1579e9deb8ac3337af60a9c20 *man/gam.Rd fe61dd0efab9e920c17335faf3d5764c *man/gam.check.Rd -a65bc22f606e45d185bc375fbf5698a1 *man/gam.control.Rd +ca99cf1e23fd828e07b49a3588d35d65 *man/gam.control.Rd 44db24b66ce63bc16d2c8bc3f5b42ac5 *man/gam.convergence.Rd 58ab3b3d6f4fd0d008d73c3c4e6d3305 *man/gam.fit.Rd dcf10ab3cc3102f7fb36d3ddf44013f5 *man/gam.fit3.Rd 8ba3991b5932b0775b452d20c9ff4d54 *man/gam.models.Rd e969287d1a5c281faa7eb6cfce31a7c5 *man/gam.outer.Rd -39e2d1fa5374f3b5cff1d761b73f0daa *man/gam.scale.Rd +c17814cea1b11e5ca374e72d6e1cbd98 *man/gam.scale.Rd 96676186808802344a99f9d3170bf775 *man/gam.selection.Rd 956bf1a6ac1361dd0403c28153b03a9b *man/gam.side.Rd b2ff252537dd2155524b774b2435e66e *man/gam.vcomp.Rd @@ -78,9 +76,9 @@ df4a6db696749986fd5e20586fc9b718 *man/gaulss.Rd a2ea1233d43fac89e0cacbc09a8d31e2 *man/in.out.Rd 9c461959be1272edcb98ee7e20fdc317 *man/inSide.Rd 2f222eeeb3d7bc42f93869bf8c2af58a *man/influence.gam.Rd -bafea2eef12fdc819f8ac1fb41d8b914 *man/initial.sp.Rd +39b9de9dbac7d9dc5c849e1a37def675 *man/initial.sp.Rd c00bcfe2d0b44b2ea955f3934421807c *man/interpret.gam.Rd -469e313e7a1b0520593c8e7a938111b5 *man/jagam.Rd +b440c31b77c368b484d9665b2b3e89fb *man/jagam.Rd 07d2c259b9edf164f42935170b4fccd0 *man/ldTweedie.Rd 58e73ac26b93dc9d28bb27c8699e12cf *man/linear.functional.terms.Rd 93035193b0faa32700e1421ce8c1e9f6 *man/logLik.gam.Rd @@ -147,32 +145,31 @@ a16b3a5a4d13c705dcab8d1cd1b3347e *man/vcov.gam.Rd 281e73658c726997196727a99a4a1f9e *man/vis.gam.Rd 2c5f6815e609f2cdb56b0067a183f915 *man/ziP.Rd ae8388103d8b1be39f55f426b205b576 *man/ziplss.Rd -4550c3e06d76e6e8a9b22fb8bbd76eb3 *po/R-de.po +7bd0744ad8ea562d7a624e066ef3390c *po/R-de.po 0bdfcf98961b0d52b60f806dc1dba77e *po/R-en@quot.po 4e65e93fef4d034a399f90421e8f323a *po/R-fr.po 73cdaf7a5a69f0b7cbfe411cd0c468b6 *po/R-ko.po 7eb472ce4c2d1dc30b4dd1091c0e88df *po/R-mgcv.pot 7b07899266c3acf3d2a625850d7cd6ef *po/R-pl.po -5b91cecd0b9e52154185d380a273c623 *po/R-po.po -ccf3140169b68ec7aff4b15e6a97e5db *po/de.po +382c94188dbc193fca9628287b66d1af *po/de.po 93f72334356fe6f05a64e567efd35c8e *po/en@quot.po fb829b82760779929951d49fe29ed2e5 *po/fr.po dc1ef92ff4454734c3a24876e299b760 *po/ko.po 8ad4757e026d1841c8f43eb97072c06e *po/mgcv.pot dfd4eec9edc7d1ab6354d47b6e2bd42f *po/pl.po -b56dac4547037ea45e2c8f9bce7aa9ef *po/po.po 03972284b3400cf82cacd5d2dc4b8cb3 *src/Makevars e71c1a1624b431fbab0a4c8f151d2a97 *src/coxph.c -08e156711c686a1f1efe505d63fabef5 *src/gdi.c +48f0c9de0da60d48cf4b306c2fdd039a *src/discrete.c +e4236680abfb9c89c7bf09b441c755c2 *src/gdi.c 2436f9b328e80370ce2203dbf1dd813c *src/general.h -e5bf24371be5ea7f3ffb40060a648803 *src/init.c +890105488631271ad8b184aa0524b59f *src/init.c 9a5d7cb3cf93cbdfc08353fbd20a270e *src/magic.c -a9735df73ae117df1b4c1197f4748389 *src/mat.c +55b92c3ab6a037f9742542c2af8cad56 *src/mat.c aae0f298e384952bb8b6b923d40520a8 *src/matrix.c 6b781cbd5b9cfee68ad30bb7ce31ef3a *src/matrix.h ec2ae157a9e3bedcccc88b053d0a4e0c *src/mgcv.c -b62374abb9910e539a3280d66ac4193f *src/mgcv.h -00f8a024faef17f90ed04f01e736df71 *src/misc.c +39b22a8fa2128fd6b52b3601841b0393 *src/mgcv.h +6b90633f745d3b3314ec5a784bde99d0 *src/misc.c 08c1706ffeec4277c484435a0644b0e3 *src/mvn.c cbe54250deb38aa5f88f8b669a4468cd *src/qp.c cd563899be5b09897d1bf36a7889caa0 *src/qp.h diff --git a/NAMESPACE b/NAMESPACE index cfe2595..7532f8e 100755 --- a/NAMESPACE +++ b/NAMESPACE @@ -65,15 +65,29 @@ export(anova.gam, bam, bam.update, betar, cox.ph,concurvity, cSplineDes, t2,te,ti,tensor.prod.model.matrix,tensor.prod.penalties, Tweedie,tw,uniquecombs, vcov.gam, vis.gam, ziP, ziplss) -importFrom(grDevices,cm.colors,gray,heat.colors,terrain.colors,topo.colors) -importFrom(graphics,axis,box,contour,hist,lines,mtext, par, persp,plot,points, - polygon,strheight,strwidth,text) +importFrom(grDevices,cm.colors,dev.interactive,devAskNewPage,gray,grey,heat.colors,terrain.colors,topo.colors) +importFrom(graphics,abline,axis,axTicks,box,contour,hist,image,lines, + mtext, par, persp,plot,points, + polygon,rect,strheight,strwidth,text,title) + +importFrom(stats,.checkMFClasses,.getXlevels,anova,approx,as.formula, +binomial,coef,cooks.distance,cor,cov, +delete.response,dbeta,dgamma,dnorm,dpois,fitted,formula,gaussian,glm, +influence,logLik,lm,mad, +make.link,median,model.frame,model.offset,model.matrix,nlm, +na.pass,napredict,na.omit,naresid,optim,pchisq,pnorm,pt,pf, +power,predict,printCoefmat,quantile, +qbeta,qbinom,qchisq,qnbinom,qgamma,qnorm,qpois,qqline,qqnorm,qqplot, +reformulate,residuals, +rbeta,rbinom,rgamma,rnbinom,rnorm,rpois,runif,sd, +termplot,terms.formula,terms,uniroot,var,vcov,weights) + +importFrom(utils,object.size) -importFrom(stats,anova,influence,cooks.distance,logLik,vcov,residuals,predict,model.matrix) importFrom(nlme,Dim,corMatrix,logDet,pdConstruct,pdFactor,pdMatrix,getGroupsFormula,lme,varFixed,lmeControl) importMethodsFrom(Matrix,t,colMeans,colSums,chol,solve,lu,expand) importFrom(Matrix,Diagonal,sparseMatrix,Matrix) -importFrom(methods,cbind2) +importFrom(methods,cbind2,as) S3method(anova, gam) S3method(influence, gam) diff --git a/R/bam.r b/R/bam.r index 2d64263..1b738a9 100644 --- a/R/bam.r +++ b/R/bam.r @@ -1,5 +1,5 @@ ## routines for very large dataset generalized additive modelling. -## (c) Simon N. Wood 2009-2013 +## (c) Simon N. Wood 2009-2015 ls.size <- function(x) { @@ -13,19 +13,20 @@ ls.size <- function(x) { for (i in 1:n) sz[i] <- object.size(x[[i]]) names(sz) <- xn sz -} +} ## ls.size -rwMatrix <- function(stop,row,weight,X) { +rwMatrix <- function(stop,row,weight,X,trans=FALSE) { ## Routine to recombine the rows of a matrix X according to info in ## stop, row and weight. Consider the ith row of the output matrix ## ind <- 1:stop[i] if i==1 and ind <- (stop[i-1]+1):stop[i] ## otherwise. The ith output row is then X[row[ind],]*weight[ind] if (is.matrix(X)) { n <- nrow(X);p<-ncol(X);ok <- TRUE} else { n<- length(X);p<-1;ok<-FALSE} stop <- stop - 1;row <- row - 1 ## R indices -> C indices - oo <-.C(C_rwMatrix,as.integer(stop),as.integer(row),as.double(weight),X=as.double(X),as.integer(n),as.integer(p)) + oo <-.C(C_rwMatrix,as.integer(stop),as.integer(row),as.double(weight),X=as.double(X), + as.integer(n),as.integer(p),trans=as.integer(trans)) if (ok) return(matrix(oo$X,n,p)) else return(oo$X) -} +} ## rwMatrix chol2qr <- function(XX,Xy,nt=1) { ## takes X'X and X'y and returns R and f @@ -71,7 +72,7 @@ qr.update <- function(Xn,yn,R=NULL,f=rep(0,0),y.norm2=0,use.chol=FALSE,nt=1) rp <- qrx$pivot;rp[rp] <- 1:p # reverse pivot return(list(R = qr.R(qrx)[,rp],f=fn,y.norm2=y.norm2)) } -} +} ## qr.update qr.up <- function(arg) { @@ -104,6 +105,127 @@ qr.up <- function(arg) { qrx } ## qr.up +compress.df <- function(dat,m=NULL) { +## Takes dataframe in dat and compresses it by rounding and duplicate +## removal. For metric variables we first find the unique cases. +## If there are <= m of these then these are employed, otherwise +## rounding is used. Factors are always reduced to the number of +## levels present in the data. Idea is that this function is called +## with columns of dataframes corresponding to single smooths or marginals. + d <- ncol(dat) + if (is.null(m)) m <- if (d==1) 1000 else if (d==2) 100 else 25 else + if (d>1) m <- round(m^{1/d}) + 1 + + mf <- mm <- 1 ## total grid points for factor and metric + for (i in 1:d) if (is.factor(dat[,i])) { + mf <- mf * length(unique(dat[,i])) + } else { + mm <- mm * m + } + xu <- uniquecombs(as.matrix(dat)) + if (nrow(xu)>mm*mf) { ## too many unique rows to use only unique + for (i in 1:d) if (!is.factor(dat[,i])) { ## round the metric variables + xl <- range(dat[,i]) + xu <- seq(xl[1],xl[2],length=m) + dx <- xu[2]-xu[1] + kx <- round((dat[,i]-xl[1])/dx)+1 + dat[,i] <- xu[kx] ## rounding the metric variables + } + xu <- uniquecombs(as.matrix(dat)) + } + k <- attr(xu,"index") + ## shuffle rows in order to avoid induced dependencies between discretized + ## covariates (which can mess up gam.side)... + seed <- try(get(".Random.seed",envir=.GlobalEnv),silent=TRUE) ## store RNG seed + if (inherits(seed,"try-error")) { + runif(1) + seed <- get(".Random.seed",envir=.GlobalEnv) + } + kind <- RNGkind(NULL) + RNGkind("default","default") + set.seed(1) ## ensure repeatability + + ii <- sample(1:nrow(xu),nrow(xu),replace=FALSE) ## shuffling index + + RNGkind(kind[1],kind[2]) + assign(".Random.seed",seed,envir=.GlobalEnv) ## RNG behaves as if it had not been used + + xu[ii,] <- xu ## shuffle rows of xu + k <- ii[k] ## correct k index accordingly + ## ... finished shuffle + xu <- as.data.frame(xu) + ## set names and levels to match dat... + names(xu) <- names(dat) + for (i in 1:d) if (is.factor(dat[,i])) { + xu[,i] <- as.factor(xu[,i]) + levels(xu[,i]) <- levels(dat[,i]) + } + k -> attr(xu,"index") + xu +} ## compress.df + +discrete.mf <- function(gp,mf,pmf,m=NULL) { +## discretize the covariates for the terms specified in smooth.spec +## id and factor by not allowed. pmf is a model frame for just the +## parametric terms --- mini.mf is applied to this. +## NOTE: matrix arguments not allowed but not caught yet. + mf0 <- list() + nk <- 0 ## count number of index vectors to avoid too much use of cbind + for (i in 1:length(gp$smooth.spec)) nk <- nk + + if (inherits(gp$smooth.spec[[i]],"tensor.smooth.spec")) length(gp$smooth.spec[[i]]$margin) else 1 + k <- matrix(0,nrow(mf),nk) + ik <- 0 ## index counter + nr <- rep(0,nk) ## number of rows for term + ## loop through the terms discretizing the covariates... + for (i in 1:length(gp$smooth.spec)) { + mi <- if (is.null(m)||length(m)==1) m else m[i] + if (inherits(gp$smooth.spec[[i]],"tensor.smooth.spec")) { ## tensor branch + for (j in 1:length(gp$smooth.spec[[i]]$margin)) { ## loop through margins + mfd <- compress.df(mf[gp$smooth.spec[[i]]$margin[[j]]$term],m=mi) + ik <- ik + 1 + k[,ik] <- attr(mfd,"index") + nr[ik] <- nrow(mfd) + mf0 <- c(mf0,mfd) + } + } else { ## not te or ti... + mfd <- compress.df(mf[gp$smooth.spec[[i]]$term],m=mi) + ik <- ik + 1 + k[,ik] <- attr(mfd,"index") + nr[ik] <- nrow(mfd) + mf0 <- c(mf0,mfd) + } + ## deal with any by variable... + if (gp$smooth.spec[[i]]$by!="NA") { + stop("currently discrete methods do not handle by variables") + if (is.factor(mf[gp$smooth.spec[[i]]$by])) stop("discretization can not handle factor by variables") + if (!is.null(gp$smooth.spec[[i]]$id)) stop("discretization can not handle smooth ids") + mf0[[gp$smooth.spec[[i]]$by]] <- rep(1,nr[ik]) ## actual by variables are handled by discrete methods + } + } ## main term loop + ## pad mf0 so that all rows are the same length + ## padding is necessary if gam.setup is to be used for setup + maxr <- max(nr) + pmf0 <- mini.mf(pmf,maxr) + if (nrow(pmf0)>maxr) maxr <- nrow(pmf0) + mf0 <- c(mf0,pmf0) + for (i in 1:length(mf0)) { + me <- length(mf0[[i]]) + if (me < maxr) mf0[[i]][(me+1):maxr] <- sample(mf0[[i]],maxr-me,replace=TRUE) + } + ## add response so that gam.setup can do its thing... + mf0[[gp$response]] <- sample(mf[[gp$response]],maxr) + + ## mf0 is the discretized model frame (actually a list), padded to have equal length rows + ## k is the index vector for each sub-matrix, only the first nr rows of which are + ## to be retained... Use of check.names=FALSE ensures, e.g. 'offset(x)' not changed... + + ## now copy back into mf so terms unchanged + mf <- mf[1:maxr,] + for (na in names(mf0)) mf[[na]] <- mf0[[na]] + + list(mf=mf,k=k,nr=nr) +} ## discrete.mf + mini.mf <-function(mf,chunk.size) { ## takes a model frame and produces a representative subset of it, suitable for ## basis setup. @@ -124,7 +246,7 @@ mini.mf <-function(mf,chunk.size) { set.seed(66) ## randomly sample from original frame... ind <- sample(1:n,chunk.size) - mf0 <- mf[ind,] + mf0 <- mf[ind,,drop=FALSE] ## ... now need to ensure certain sorts of representativeness ## work through elements collecting the rows containing @@ -155,28 +277,248 @@ mini.mf <-function(mf,chunk.size) { RNGkind(kind[1], kind[2]) assign(".Random.seed", seed, envir = .GlobalEnv) -## problems with the following are... -## 1. you can produce model frame rows that are wholly un-representative of the -## data for multi dimensional smooths this way, by pairing extreme values -## with values of other variables that they never occur near. -## 2. Nothing is done to ensure that all factor levels are present. -# mf0 <- mf[ind,] ## random sample of rows - ## now need to ensure that max and min are in sample for each element of mf0 - ## note that min and max might occur twice, but this shouldn't matter (and - ## is better than min overwriting max, for example) -# for (j in 1:length(mf)) if (is.numeric(mf0[[j]])) { -# if (is.matrix(mf0[[j]])) { ## find row containing minimum -# j.min <- min((1:n)[as.logical(rowSums(mf[[j]]==min(mf[[j]])))]) -# j.max <- min((1:n)[as.logical(rowSums(mf[[j]]==max(mf[[j]])))]) -# mf0[[j]][1,] <- mf[[j]][j.min,] -# mf0[[j]][2,] <- mf[[j]][j.max,] -# } else { ## vector -# mf0[[j]][1] <- min(mf[[j]]) -# mf0[[j]][2] <- max(mf[[j]]) -# } -# } mf0 -} +} ## mini.mf + + +bgam.fitd <- function (G, mf, gp ,scale , coef=NULL,etastart = NULL, + mustart = NULL, offset = rep(0, nobs),rho=0, control = gam.control(), intercept = TRUE, + gc.level=0,nobs.extra=0,npt=1) { +## This is a version of bgam.fit1 designed for use with discretized covariates. +## Difference to bgam.fit1 is that XWX, XWy and Xbeta are computed in C +## code using compressed versions of X. Parallelization of XWX formation +## is performed at the C level using openMP. +## Alternative fitting iteration using Choleski only, including for REML. +## Basic idea is to take only one Newton step for parameters per iteration +## and to control the step length to ensure that at the end of the step we +## are not going uphill w.r.t. the REML criterion... + + y <- mf[[gp$response]] + weights <- G$w + conv <- FALSE + nobs <- nrow(mf) + offset <- G$offset + + if (rho!=0) { ## AR1 error model + + ld <- 1/sqrt(1-rho^2) ## leading diagonal of root inverse correlation + sd <- -rho*ld ## sub diagonal + N <- nobs + ## see rwMatrix() for how following are used... + ar.row <- c(1,rep(1:N,rep(2,N))[-c(1,2*N)]) ## index of rows to reweight + ar.weight <- c(1,rep(c(sd,ld),N-1)) ## row weights + ar.stop <- c(1,1:(N-1)*2+1) ## (stop[i-1]+1):stop[i] are the rows to reweight to get ith row + if (!is.null(mf$"(AR.start)")) { ## need to correct the start of new AR sections... + ii <- which(mf$"(AR.start)"==TRUE) + if (length(ii)>0) { + if (ii[1]==1) ii <- ii[-1] ## first observation does not need any correction + ar.weight[ii*2-2] <- 0 ## zero sub diagonal + ar.weight[ii*2-1] <- 1 ## set leading diagonal to 1 + } + } + } else {## AR setup complete + ar.row <- ar.weight <- ar.stop <- -1 ## signal no re-weighting + } + + family <- G$family + additive <- if (family$family=="gaussian"&&family$link=="identity") TRUE else FALSE + variance <- family$variance + dev.resids <- family$dev.resids + linkinv <- family$linkinv + mu.eta <- family$mu.eta + if (!is.function(variance) || !is.function(linkinv)) + stop("'family' argument seems not to be a valid family object") + valideta <- family$valideta + if (is.null(valideta)) + valideta <- function(eta) TRUE + validmu <- family$validmu + if (is.null(validmu)) + validmu <- function(mu) TRUE + if (is.null(mustart)) { + eval(family$initialize) + } + else { + mukeep <- mustart + eval(family$initialize) + mustart <- mukeep + } + + eta <- if (!is.null(etastart)) + etastart + else family$linkfun(mustart) + + mu <- linkinv(eta) + if (!(validmu(mu) && valideta(eta))) + stop("cannot find valid starting values: please specify some") + dev <- sum(dev.resids(y, mu, weights))*2 ## just to avoid converging at iter 1 + + conv <- FALSE + + G$coefficients <- rep(0,ncol(G$X)) + class(G) <- "gam" + + ## need to reset response and weights to post initialization values + ## in particular to deal with binomial properly... + G$y <- y + G$w <- weights + + Sl <- Sl.setup(G) ## setup block diagonal penalty object + rank <- 0 + for (b in 1:length(Sl)) rank <- rank + Sl[[b]]$rank + Mp <- ncol(G$X) - rank ## null space dimension + Nstep <- 0 + for (iter in 1L:control$maxit) { ## main fitting loop + devold <- dev + dev <- 0 + ## accumulate the QR decomposition of the weighted model matrix + if (iter==1||!additive) { + qrx <- list() + if (iter>1) { + ## form eta = X%*%beta + eta <- Xbd(G$Xd,coef,G$kd,G$ts,G$dt,G$v,G$qc,G$drop) + } + mu <- linkinv(eta) + mu.eta.val <- mu.eta(eta) + good <- mu.eta.val != 0 + mu.eta.val[!good] <- .1 ## irrelvant as weight is zero + z <- (eta - offset) + (G$y - mu)/mu.eta.val + w <- (G$w * mu.eta.val^2)/variance(mu) + dev <- sum(dev.resids(G$y,mu,G$w)) + + qrx$y.norm2 <- if (rho==0) sum(w*z^2) else ## AR mod needed + sum(rwMatrix(ar.stop,ar.row,ar.weight,sqrt(w)*z,trans=FALSE)^2) + + ## form X'WX efficiently... + qrx$R <- XWXd(G$Xd,w,G$kd,G$ts,G$dt,G$v,G$qc,npt,G$drop,ar.stop,ar.row,ar.weight) + ## form X'Wz efficiently... + qrx$f <- XWyd(G$Xd,w,z,G$kd,G$ts,G$dt,G$v,G$qc,G$drop,ar.stop,ar.row,ar.weight) + if(gc.level>1) gc() + + ## following reparameterizes X'X and f=X'y, according to initial reparameterizarion... + qrx$XX <- Sl.initial.repara(Sl,qrx$R,inverse=FALSE,both.sides=TRUE,cov=FALSE,nt=npt) + qrx$Xy <- Sl.initial.repara(Sl,qrx$f,inverse=FALSE,both.sides=FALSE,cov=FALSE,nt=npt) + + G$n <- nobs + } else { ## end of if (iter==1||!additive) + dev <- qrx$y.norm2 - sum(coef*qrx$f) ## actually penalized deviance + } + + if (control$trace) + message(gettextf("Deviance = %s Iterations - %d", dev, iter, domain = "R-mgcv")) + + if (!is.finite(dev)) stop("Non-finite deviance") + + ## preparation for working model fit is ready, but need to test for convergence first + if (iter>2 && abs(dev - devold)/(0.1 + abs(dev)) < control$epsilon) { + conv <- TRUE + #coef <- start + break + } + + ## use fast REML code + ## block diagonal penalty object, Sl, set up before loop + + if (iter==1) { ## need to get initial smoothing parameters + lambda.0 <- initial.sp(qrx$R,G$S,G$off,XX=TRUE) ## note that this uses the untrasformed X'X in qrx$R + ## convert intial s.p.s to account for L + lsp0 <- log(lambda.0) ## initial s.p. + if (!is.null(G$L)) lsp0 <- as.numeric(coef(lm(lsp0 ~ G$L-1+offset(G$lsp0)))) + n.sp <- length(lsp0) + } + + ## carry forward scale estimate if possible... + if (scale>0) log.phi <- log(scale) else { + if (iter==1) { + if (is.null(coef)||qrx$y.norm2==0) lsp0[n.sp+1] <- log(var(as.numeric(G$y))*.05) else + lsp0[n.sp+1] <- log(qrx$y.norm2/(nobs+nobs.extra)) + } + } + + ## get beta, grad and proposed Newton step... + repeat { ## Take a Newton step to update log sp and phi + lsp <- lsp0 + Nstep + if (scale<=0) log.phi <- lsp[n.sp+1] + prop <- Sl.fitChol(Sl,qrx$XX,qrx$Xy,rho=lsp[1:n.sp],yy=qrx$y.norm2,L=G$L,rho0=G$lsp0,log.phi=log.phi, + phi.fixed=scale>0,nobs=nobs,Mp=Mp,nt=npt,tol=dev*.Machine$double.eps^.7) + if (max(Nstep)==0) { + Nstep <- prop$step;lsp0 <- lsp; + break + } else { + if (sum(prop$grad*Nstep)>dev*1e-7) Nstep <- Nstep/2 else { + Nstep <- prop$step;lsp0 <- lsp;break; + } + } + } ## end of sp update + + coef <- Sl.initial.repara(Sl,prop$beta,inverse=TRUE,both.sides=FALSE,cov=FALSE) + + if (any(!is.finite(coef))) { + conv <- FALSE + warning(gettextf("non-finite coefficients at iteration %d", + iter)) + break + } + } ## end fitting iteration + + if (!conv) + warning("algorithm did not converge") + + eps <- 10 * .Machine$double.eps + if (family$family == "binomial") { + if (any(mu > 1 - eps) || any(mu < eps)) + warning("fitted probabilities numerically 0 or 1 occurred") + } + if (family$family == "poisson") { + if (any(mu < eps)) + warning("fitted rates numerically 0 occurred") + } + Mp <- G$nsdf + if (length(G$smooth)>1) for (i in 1:length(G$smooth)) Mp <- Mp + G$smooth[[i]]$null.space.dim + scale <- exp(log.phi) + reml <- (dev/scale - prop$ldetS + prop$ldetXXS + (length(y)-Mp)*log(2*pi*scale))/2 + if (rho!=0) { ## correct REML score for AR1 transform + df <- if (is.null(mf$"(AR.start)")) 1 else sum(mf$"(AR.start)") + reml <- reml - (nobs-df)*log(ld) + } + + object <- list(db.drho=prop$db, + gcv.ubre=reml,mgcv.conv=conv,rank=prop$r, + scale.estimated = scale<=0,outer.info=NULL, + optimizer=c("perf","chol")) + object$coefficients <- coef + ## form linear predictor efficiently... + object$linear.predictors <- Xbd(G$Xd,coef,G$kd,G$ts,G$dt,G$v,G$qc,G$drop) + G$offset + PP <- Sl.initial.repara(Sl,prop$PP,inverse=TRUE,both.sides=TRUE,cov=TRUE,nt=npt) + F <- pmmult(PP,qrx$R,FALSE,FALSE,nt=npt) ##crossprod(PP,qrx$R) - qrx$R contains X'WX in this case + object$edf <- diag(F) + object$edf1 <- 2*object$edf - rowSums(t(F)*F) + object$sp <- exp(lsp[1:n.sp]) + object$sig2 <- object$scale <- scale + object$Vp <- PP * scale + object$Ve <- pmmult(F,object$Vp,FALSE,FALSE,nt=npt) ## F%*%object$Vp + ## sp uncertainty correction... + M <- ncol(prop$db) + ev <- eigen(prop$hess,symmetric=TRUE) + ind <- ev$values <= 0 + ev$values[ind] <- 0;ev$values[!ind] <- 1/sqrt(ev$values[!ind]) + rV <- (ev$values*t(ev$vectors))[,1:M] + Vc <- pcrossprod(rV%*%t(prop$db),nt=npt) + Vc <- object$Vp + Vc ## Bayesian cov matrix with sp uncertainty + object$edf2 <- rowSums(Vc*qrx$R)/scale + object$Vc <- Vc + object$outer.info <- list(grad = prop$grad,hess=prop$hess) + + object$R <- pchol(qrx$R,npt) + piv <- attr(object$R,"pivot") + object$R[,piv] <- object$R + object$iter <- iter + object$wt <- w + object$y <- G$y + rm(G);if (gc.level>0) gc() + object +} ## end bgam.fitd + + bgam.fit <- function (G, mf, chunk.size, gp ,scale ,gamma,method, coef=NULL,etastart = NULL, mustart = NULL, offset = rep(0, nobs), control = gam.control(), intercept = TRUE, @@ -322,7 +664,7 @@ bgam.fit <- function (G, mf, chunk.size, gp ,scale ,gamma,method, coef=NULL,etas qrx <- chol2qr(qrx$R,qrx$f,nt=npt) qrx$y.norm2 <- y.norm2 } - } else { ## use new parallel accumulation + } else { ## use parallel accumulation for (i in 1:length(arg)) arg[[i]]$coef <- coef res <- parallel::parLapply(cl,arg,qr.up) ## single thread debugging version @@ -402,7 +744,7 @@ bgam.fit <- function (G, mf, chunk.size, gp ,scale ,gamma,method, coef=NULL,etas fit <- fast.REML.fit(um$Sl,um$X,qrx$f,rho=lsp0,L=G$L,rho.0=G$lsp0, log.phi=log.phi,phi.fixed=scale>0,rss.extra=rss.extra, nobs =nobs+nobs.extra,Mp=um$Mp,nt=npt) - res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=FALSE) + res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=FALSE,L=G$L,nt=npt) object <- list(coefficients=res$beta,db.drho=fit$d1b, gcv.ubre=fit$reml,mgcv.conv=list(iter=fit$iter, message=fit$conv),rank=ncol(um$X), @@ -466,7 +808,7 @@ bgam.fit <- function (G, mf, chunk.size, gp ,scale ,gamma,method, coef=NULL,etas } ## end fitting iteration if (method=="fREML") { ## do expensive cov matrix cal only at end - res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=TRUE,scale=scale) + res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=TRUE,scale=scale,L=G$L,nt=npt) object$edf <- res$edf object$edf1 <- res$edf1 object$edf2 <- res$edf2 @@ -589,7 +931,7 @@ bgam.fit2 <- function (G, mf, chunk.size, gp ,scale ,gamma,method, etastart = NU ## preparation for working model fit is ready, but need to test for convergence first if (iter>2 && abs(dev - devold)/(0.1 + abs(dev)) < control$epsilon) { conv <- TRUE - coef <- start + # coef <- start break } @@ -714,7 +1056,7 @@ ar.qr.up <- function(arg) { qrx$yX.last <- yX.last if (arg$gc.level>1) {rm(arg,w,y,ind);gc()} qrx -} +} ## ar.qr.up pabapr <- function(arg) { ## function for parallel calling of predict.gam @@ -804,6 +1146,7 @@ predict.bam <- function(object,newdata,type="link",se.fit=FALSE,terms=NULL, } } ## end predict.bam + bam.fit <- function(G,mf,chunk.size,gp,scale,gamma,method,rho=0, cl=NULL,gc.level=0,use.chol=FALSE,npt=1) ## function that does big additive model fit in strictly additive case @@ -954,7 +1297,7 @@ bam.fit <- function(G,mf,chunk.size,gp,scale,gamma,method,rho=0, G$y <- mf[[gp$response]] } else { ## n <= chunk.size - if (rho==0) qrx <- qr.update(sqrt(G$w)*G$X,sqrt(G$w)*G$y,use.chol=use.chol,nt=npt) else { + if (rho==0) qrx <- qr.update(sqrt(G$w)*G$X,sqrt(G$w)*(G$y-G$offset),use.chol=use.chol,nt=npt) else { row <- c(1,rep(1:n,rep(2,n))[-c(1,2*n)]) weight <- c(1,rep(c(sd,ld),n-1)) stop <- c(1,1:(n-1)*2+1) @@ -999,7 +1342,7 @@ bam.fit <- function(G,mf,chunk.size,gp,scale,gamma,method,rho=0, fit <- fast.REML.fit(um$Sl,um$X,qrx$f,rho=lsp0,L=G$L,rho.0=G$lsp0, log.phi=log.phi,phi.fixed=scale>0,rss.extra=rss.extra, nobs =n,Mp=um$Mp,nt=npt) - res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=TRUE,scale=scale) + res <- Sl.postproc(Sl,fit,um$undrop,qrx$R,cov=TRUE,scale=scale,L=G$L,nt=npt) object <- list(coefficients=res$beta,edf=res$edf,edf1=res$edf1,edf2=res$edf2,##F=res$F, db.drho=fit$d1b, gcv.ubre=fit$reml,hat=res$hat,mgcv.conv=list(iter=fit$iter, @@ -1109,13 +1452,31 @@ sparse.model.matrix <- function(G,mf,chunk.size) { gc() } X -} - +} # sparse.model.matrix + +tero <- function(sm) { +## te smooth spec re-order so that largest marginal is last. + maxd <- 0 + ns <- length(sm$margin) + for (i in 1:ns) if (sm$margin[[i]]$bs.dim>=maxd) { + maxi <- i;maxd <- sm$margin[[i]]$bs.dim + } + if (maxi1) { + gp$smooth.spec[[i]]$xt <- "tensor" + class(gp$smooth.spec[[i]]) <- c("re.smooth.spec","tensor.smooth.spec") + gp$smooth.spec[[i]]$margin <- list() + for (j in 1:gp$smooth.spec[[i]]$dim) gp$smooth.spec[[i]]$margin[[j]] <- list(term=gp$smooth.spec[[i]]$term[j]) + } + } + } cl <- match.call() # call needed in gam object for update to work mf <- match.call(expand.dots=FALSE) mf$formula <- gp$fake.formula mf$method <- mf$family<-mf$control<-mf$scale<-mf$knots<-mf$sp<-mf$min.sp <- mf$gc.level <- - mf$gamma <- mf$paraPen<- mf$chunk.size <- mf$rho <- mf$sparse <- mf$cluster <- + mf$gamma <- mf$paraPen<- mf$chunk.size <- mf$rho <- mf$sparse <- mf$cluster <- mf$discrete <- mf$use.chol <- mf$samfrac <- mf$nthreads <- mf$G <- mf$fit <- mf$...<-NULL mf$drop.unused.levels <- drop.unused.levels mf[[1]]<-as.name("model.frame") @@ -1160,9 +1550,8 @@ bam <- function(formula,family=gaussian(),data=list(),weights=NULL,subset=NULL,n pmf$formula <- gp$pf pmf <- eval(pmf, parent.frame()) # pmf contains all data for parametric part - pterms <- attr(pmf,"terms") ## pmf only used for this - rm(pmf); - + pterms <- attr(pmf,"terms") ## pmf only used for this and discretization, if selected. + if (gc.level>0) gc() mf <- eval(mf, parent.frame()) # the model frame now contains all the data @@ -1186,16 +1575,89 @@ bam <- function(formula,family=gaussian(),data=list(),weights=NULL,subset=NULL,n rm(dl); if (gc.level>0) gc() ## save space ## need mini.mf for basis setup, then accumulate full X, y, w and offset - mf0 <- mini.mf(mf,chunk.size) + if (discretize) { + ## discretize the data, creating list mf0 with discrete values + ## and indices giving the discretized value for each element of model frame. + ## 'discrete' can be null, or contain a discretization size, or + ## a discretization size per smooth term. + dk <- discrete.mf(gp,mf,pmf,m=discrete) + mf0 <- dk$mf ## padded discretized model frame + sparse.cons <- 0 ## default constraints required for tensor terms + } else { + mf0 <- mini.mf(mf,chunk.size) + if (sparse) sparse.cons <- 2 else sparse.cons <- -1 + } + rm(pmf); ## no further use - if (sparse) sparse.cons <- 2 else sparse.cons <- -1 - G <- gam.setup(gp,pterms=pterms, data=mf0,knots=knots,sp=sp,min.sp=min.sp, H=NULL,absorb.cons=TRUE,sparse.cons=sparse.cons,select=FALSE, idLinksBases=TRUE,scale.penalty=control$scalePenalty, paraPen=paraPen) + if (discretize) { + v <- G$Xd <- list() + ## have to extract full parametric model matrix from pterms and mf + G$Xd[[1]] <- model.matrix(G$pterms,mf) + G$kd <- cbind(1:nrow(mf),dk$k) ## add index for parametric part to index list + ## create data object suitable for discrete data methods, from marginal model + ## matrices in G$smooth and G$X (stripping out padding, of course) + if (ncol(G$Xd[[1]])) { + kb <- k <- 2; qc <- dt <- ts <- rep(0,length(G$smooth)+1) + dt[1] <- ts[1] <- 1; + dk$nr <- c(NA,dk$nr) ## need array index to match elements of Xd + } else { + kb <- k <- 1; qc <- dt <- ts <- rep(0,length(G$smooth)) + } + drop <- rep(0,0) ## index of te related columns to drop + for (i in 1:length(G$smooth)) { + ts[kb] <- k + dt[kb] <- length(G$smooth[[i]]$margin) + if (inherits(G$smooth[[i]],"tensor.smooth")) { + if (inherits(G$smooth[[i]],"random.effect")&&!is.null(G$smooth[[i]]$rind)) { + ## terms re-ordered for efficiency, so the same has to be done on indices... + rind <- k:(k+dt[kb]-1) + dk$nr[rind] <- dk$nr[k+G$smooth[[i]]$rind-1] + G$kd[,rind] <- G$kd[,k+G$smooth[[i]]$rind-1] + } + + for (j in 1:dt[kb]) { + G$Xd[[k]] <- G$smooth[[i]]$margin[[j]]$X[1:dk$nr[k],,drop=FALSE] + k <- k + 1 + } + ## deal with any side constraints on tensor terms + di <- attr(G$smooth[[i]],"del.index") + if (!is.null(di)&&length(di>0)) { + di <- di + G$smooth[[i]]$first.para + length(drop) - 1 + drop <- c(drop,di) + } + ## deal with tensor smooth constraint + qrc <- attr(G$smooth[[i]],"qrc") + ## compute v such that Q = I-vv' and Q[,-1] is constraint null space basis + if (inherits(qrc,"qr")) { + v[[kb]] <- qrc$qr/sqrt(qrc$qraux);v[[kb]][1] <- sqrt(qrc$qraux) + qc[kb] <- 1 ## indicate a constraint + } else { + v[[kb]] <- rep(0,0) ## + if (!inherits(qrc,"character")||qrc!="no constraints") warning("unknown tensor constraint type") + } + } else { + v[[kb]] <- rep(0,0) + dt[kb] <- 1 + G$Xd[[k]] <- G$X[1:dk$nr[k],G$smooth[[i]]$first.para:G$smooth[[i]]$last.para] + k <- k + 1 + } + kb <- kb + 1 + } + if (length(drop>0)) G$drop <- drop + ## ... Xd is the list of discretized model matrices, or marginal model matrices + ## kd contains indexing vectors, so the ith model matrix or margin is Xd[[i]][kd[i,],] + ## ts[i] is the starting matrix in Xd for the ith model matrix, while dt[i] is the number + ## of elements of Xd that make it up (1 for a dingleton, more for a tensor). + ## v is list of Householder vectors encoding constraints and qc the constraint indicator. + G$v <- v;G$ts <- ts;G$dt <- dt;G$qc <- qc + } ## if (discretize) + G$sparse <- sparse ## no advantage to "fREML" with no free smooths... @@ -1226,7 +1688,7 @@ bam <- function(formula,family=gaussian(),data=list(),weights=NULL,subset=NULL,n G$min.edf<-G$nsdf #-dim(G$C)[1] if (G$m) for (i in 1:G$m) G$min.edf<-G$min.edf+G$smooth[[i]]$null.space.dim - + G$discretize <- discretize G$formula<-formula ## environment(G$formula)<-environment(formula) environment(G$pterms) <- environment(G$terms) <- environment(G$pred.formula) <- @@ -1260,16 +1722,21 @@ bam <- function(formula,family=gaussian(),data=list(),weights=NULL,subset=NULL,n colnamesX <- colnames(G$X) if (G$sparse) { ## Form a sparse model matrix... + warning("sparse=TRUE is deprecated") if (sum(G$X==0)/prod(dim(G$X))<.5) warning("model matrix too dense for any possible benefit from sparse") if (nrow(mf)<=chunk.size) G$X <- as(G$X,"dgCMatrix") else G$X <- sparse.model.matrix(G,mf,chunk.size) if (rho!=0) warning("AR1 parameter rho unused with sparse fitting") object <- bgam.fit2(G, mf, chunk.size, gp ,scale ,gamma,method=method, control = control,npt=nthreads,...) - } else if (G$am) { + } else if (G$am&&!G$discretize) { if (nrow(mf)>chunk.size) G$X <- matrix(0,0,ncol(G$X)); if (gc.level>1) gc() object <- bam.fit(G,mf,chunk.size,gp,scale,gamma,method,rho=rho,cl=cluster, gc.level=gc.level,use.chol=use.chol,npt=nthreads) + } else if (G$discretize) { + object <- bgam.fitd(G, mf, gp ,scale ,nobs.extra=0,rho=rho, + control = control,npt=nthreads,gc.level=gc.level,...) + } else { G$X <- matrix(0,0,ncol(G$X)); if (gc.level>1) gc() if (rho!=0) warning("AR1 parameter rho unused with generalized model") @@ -1346,14 +1813,16 @@ bam <- function(formula,family=gaussian(),data=list(),weights=NULL,subset=NULL,n names(object$coefficients) <- G$term.names names(object$edf) <- G$term.names - rm(G);if (gc.level>0) gc() - ## note that predict.gam assumes that it must be ok not to split the ## model frame, if no new data supplied, so need to supply explicitly class(object) <- c("bam","gam","glm","lm") - object$linear.predictors <- as.numeric(predict.bam(object,newdata=object$model,block.size=chunk.size,cluster=cluster)) - object$fitted.values <- family$linkinv(object$linear.predictors) + if (!G$discretize) object$linear.predictors <- + as.numeric(predict.bam(object,newdata=object$model,block.size=chunk.size,cluster=cluster)) + rm(G);if (gc.level>0) gc() + + object$fitted.values <- family$linkinv(object$linear.predictors) + object$residuals <- sqrt(family$dev.resids(object$y,object$fitted.values,object$prior.weights)) * sign(object$y-object$fitted.values) object$deviance <- sum(object$residuals^2) @@ -1479,7 +1948,7 @@ bam.update <- function(b,data,chunk.size=10000) { log.phi=log.phi,phi.fixed = !b$scale.estimated,rss.extra=rss.extra, nobs =n,Mp=um$Mp,nt=1) if (b$scale.estimated) scale <- -1 else scale=b$sig2 - res <- Sl.postproc(b$Sl,fit,um$undrop,b$qrx$R,cov=TRUE,scale=scale) + res <- Sl.postproc(b$Sl,fit,um$undrop,b$qrx$R,cov=TRUE,scale=scale,L=b$g$L) object <- list(coefficients=res$beta,edf=res$edf,edf1=res$edf1,edf2=res$edf2,##F=res$F, diff --git a/R/efam.r b/R/efam.r index 7ca9646..7e978da 100644 --- a/R/efam.r +++ b/R/efam.r @@ -96,11 +96,11 @@ ocat <- function(theta=NULL,link="identity",R=NULL) { validmu <- function(mu) all(is.finite(mu)) dev.resids <- function(y, mu, wt,theta=NULL) { - F <- function(x) { - h <- ind <- x > 0; h[ind] <- 1/(exp(-x[ind]) + 1) - x <- exp(x[!ind]); h[!ind] <- (x/(1+x)) - h - } + #F <- function(x) { + # h <- ind <- x > 0; h[ind] <- 1/(exp(-x[ind]) + 1) + # x <- exp(x[!ind]); h[!ind] <- (x/(1+x)) + # h + #} Fdiff <- function(a,b) { ## cancellation resistent F(b)-F(a), b>a h <- rep(1,length(b)); h[b>0] <- -1; eb <- exp(b*h) @@ -144,11 +144,11 @@ ocat <- function(theta=NULL,link="identity",R=NULL) { Dd <- function(y, mu, theta, wt=NULL, level=0) { ## derivatives of the deviance... - F <- function(x) { ## e^(x)/(1+e^x) without overflow - h <- ind <- x > 0; h[ind] <- 1/(exp(-x[ind]) + 1) - x <- exp(x[!ind]); h[!ind] <- (x/(1+x)) - h - } + # F <- function(x) { ## e^(x)/(1+e^x) without overflow + # h <- ind <- x > 0; h[ind] <- 1/(exp(-x[ind]) + 1) + # x <- exp(x[!ind]); h[!ind] <- (x/(1+x)) + # h + # } Fdiff <- function(a,b) { ## cancellation resistent F(b)-F(a), b>a h <- rep(1,length(b)); h[b>0] <- -1; eb <- exp(b*h) @@ -958,7 +958,7 @@ betar <- function (theta = NULL, link = "logit",eps=.Machine$double.eps*100) { ## derivatives of the -2*loglik... ## ltheta <- theta theta <- exp(theta) - onemu <- 1 - mu; oney <- 1 - y + onemu <- 1 - mu; ## oney <- 1 - y muth <- mu*theta; ## yth <- y*theta onemuth <- onemu*theta ## (1-mu)*theta psi0.th <- digamma(theta) @@ -1595,18 +1595,18 @@ ziP <- function (theta = NULL, link = "identity") { }) - fv <- function(lp,theta=NULL) { - ## optional function to give fitted values... - if (is.null(theta)) theta <- get(".Theta") - th1 <- theta[1]; th2 <- exp(theta[2]); - eta <- th1 + th2*lp - p <- 1 - exp(-exp(eta)) - fv <- lambda <- exp(lp) - ind <- lp < log(.Machine$double.eps)/2 - fv[!ind] <- p[!ind] * lambda[!ind]/(1-exp(-lambda[!ind])) - fv[ind] <- p[ind] - fv - } ## fv +# fv <- function(lp,theta=NULL) { +# ## optional function to give fitted values... +# if (is.null(theta)) theta <- get(".Theta") +# th1 <- theta[1]; th2 <- exp(theta[2]); +# eta <- th1 + th2*lp +# p <- 1 - exp(-exp(eta)) +# fv <- lambda <- exp(lp) +# ind <- lp < log(.Machine$double.eps)/2 +# fv[!ind] <- p[!ind] * lambda[!ind]/(1-exp(-lambda[!ind])) +# fv[ind] <- p[ind] +# fv +# } ## fv rd <- function(mu,wt,scale) { ## simulate data given fitted latent variable in mu diff --git a/R/fast-REML.r b/R/fast-REML.r index 0df0b56..ac50ab8 100644 --- a/R/fast-REML.r +++ b/R/fast-REML.r @@ -166,6 +166,7 @@ Sl.setup <- function(G) { ind <- 1:Sl[[b]]$rank for (j in 1:length(Sl[[b]]$S)) { ## project penalties into range space of total penalty Sl[[b]]$S[[j]] <- t(U[,ind])%*%Sl[[b]]$S[[j]]%*%U[,ind] + Sl[[b]]$S[[j]] <- (t(Sl[[b]]$S[[j]]) + Sl[[b]]$S[[j]])/2 ## avoid over-zealous chol sym check Sl[[b]]$rS[[j]] <- mroot(Sl[[b]]$S[[j]],Sl[[b]]$rank) } Sl[[b]]$ind <- rep(FALSE,ncol(U)) @@ -180,6 +181,7 @@ Sl.setup <- function(G) { St <- St + Sl[[b]]$S[[j]]/S.norm lambda <- c(lambda,1/S.norm) } + St <- (t(St) + St)/2 ## avoid over-zealous chol sym check St <- t(mroot(St,Sl[[b]]$rank)) indc <- Sl[[b]]$start:(Sl[[b]]$start+ncol(St)-1) indr <- Sl[[b]]$start:(Sl[[b]]$start+nrow(St)-1) @@ -191,7 +193,7 @@ Sl.setup <- function(G) { Sl ## the penalty list } ## end of Sl.setup -Sl.initial.repara <- function(Sl,X,inverse=FALSE,both.sides=TRUE,cov=TRUE) { +Sl.initial.repara <- function(Sl,X,inverse=FALSE,both.sides=TRUE,cov=TRUE,nt=1) { ## Routine to apply initial Sl re-parameterization to model matrix X, ## or, if inverse==TRUE, to apply inverse re-para to parameter vector ## or cov matrix. if inverse is TRUE and both.sides=FALSE then @@ -203,8 +205,10 @@ Sl.initial.repara <- function(Sl,X,inverse=FALSE,both.sides=TRUE,cov=TRUE) { for (b in 1:length(Sl)) { ind <- Sl[[b]]$start:Sl[[b]]$stop if (is.matrix(Sl[[b]]$D)) { - if (both.sides) X[ind,] <- Sl[[b]]$D%*%X[ind,,drop=FALSE] - X[,ind] <- X[,ind,drop=FALSE]%*%t(Sl[[b]]$D) + if (both.sides) X[ind,] <- if (nt==1) Sl[[b]]$D%*%X[ind,,drop=FALSE] else + pmmult(Sl[[b]]$D,X[ind,,drop=FALSE],FALSE,FALSE,nt=nt) + X[,ind] <- if (nt==1) X[,ind,drop=FALSE]%*%t(Sl[[b]]$D) else + pmmult(X[,ind,drop=FALSE],Sl[[b]]$D,FALSE,TRUE,nt=nt) } else { ## Diagonal D X[,ind] <- t(Sl[[b]]$D * t(X[,ind,drop=FALSE])) if (both.sides) X[ind,] <- Sl[[b]]$D * X[ind,,drop=FALSE] @@ -215,8 +219,10 @@ Sl.initial.repara <- function(Sl,X,inverse=FALSE,both.sides=TRUE,cov=TRUE) { ind <- Sl[[b]]$start:Sl[[b]]$stop if (is.matrix(Sl[[b]]$D)) { Di <- if(is.null(Sl[[b]]$Di)) t(Sl[[b]]$D) else Sl[[b]]$Di - if (both.sides) X[ind,] <- t(Di)%*%X[ind,,drop=FALSE] - X[,ind] <- X[,ind,drop=FALSE]%*%Di + if (both.sides) X[ind,] <- if (nt==1) t(Di)%*%X[ind,,drop=FALSE] else + pmmult(Di,X[ind,,drop=FALSE],TRUE,FALSE,nt=nt) + X[,ind] <- if (nt==1) X[,ind,drop=FALSE]%*%Di else + pmmult(X[,ind,drop=FALSE],Di,FALSE,FALSE,nt=nt) } else { ## Diagonal D Di <- 1/Sl[[b]]$D X[,ind] <- t(Di * t(X[,ind,drop=FALSE])) @@ -233,15 +239,72 @@ Sl.initial.repara <- function(Sl,X,inverse=FALSE,both.sides=TRUE,cov=TRUE) { } } else for (b in 1:length(Sl)) { ## model matrix re-para ind <- Sl[[b]]$start:Sl[[b]]$stop - if (is.matrix(Sl[[b]]$D)) X[,ind] <- X[,ind,drop=FALSE]%*%Sl[[b]]$D else - X[,ind] <- t(Sl[[b]]$D*t(X[,ind,drop=FALSE])) ## X[,ind]%*%diag(Sl[[b]]$D) + if (is.matrix(X)) { + if (is.matrix(Sl[[b]]$D)) { + if (both.sides) X[ind,] <- if (nt==1) t(Sl[[b]]$D)%*%X[ind,,drop=FALSE] else + pmmult(Sl[[b]]$D,X[ind,,drop=FALSE],TRUE,FALSE,nt=nt) + X[,ind] <- if (nt==1) X[,ind,drop=FALSE]%*%Sl[[b]]$D else + pmmult(X[,ind,drop=FALSE],Sl[[b]]$D,FALSE,FALSE,nt=nt) + } else { + if (both.sides) X[ind,] <- Sl[[b]]$D * X[ind,,drop=FALSE] + X[,ind] <- t(Sl[[b]]$D*t(X[,ind,drop=FALSE])) ## X[,ind]%*%diag(Sl[[b]]$D) + } + } else { + if (is.matrix(Sl[[b]]$D)) X[ind] <- t(Sl[[b]]$D)%*%X[ind] else + X[ind] <- Sl[[b]]$D*X[ind] + } } X } ## end Sl.initial.repara -ldetS <- function(Sl,rho,fixed,np,root=FALSE) { + +ldetSblock <- function(rS,rho,deriv=2,root=FALSE,nt=1) { +## finds derivatives wrt rho of log|S| where +## S = sum_i tcrossprod(rS[[i]]*exp(rho[i])) +## when S is full rank +ve def and no +## reparameterization is required.... + lam <- exp(rho) + S <- pcrossprod(rS[[1]],trans=TRUE,nt=nt)*lam[1] + ##tcrossprod(rS[[1]])*lam[1] ## parallel + p <- ncol(S) + m <- length(rS) + if (m > 1) for (i in 2:m) S <- S + pcrossprod(rS[[i]],trans=TRUE,nt=nt)*lam[i] + ## S <- S + tcrossprod(rS[[i]])*lam[i] ## parallel + if (!root) E <- S + d <- diag(S);d[d<=0] <- 1;d <- sqrt(d) + S <- t(S/d)/d ## diagonally pre-condition + R <- if (nt>1) pchol(S,nt) else suppressWarnings(chol(S,pivot=TRUE)) + piv <- attr(R,"pivot") + r <- attr(R,"rank") + if (r1) pchol(t(XXp/d)/d,nt) else suppressWarnings(chol(t(XXp/d)/d,pivot=TRUE)) + r <- Rrank(R);p <- ncol(XXp) + piv <- attr(R,"pivot") #;rp[rp] <- 1:p + if (r tol)|(abs(diag(reml2))>tol) + hess <- reml2 + grad <- reml1 + if (sum(uconv.ind)!=ncol(reml2)) { + reml1 <- reml1[uconv.ind] + reml2 <- reml2[uconv.ind,uconv.ind] + } + + er <- eigen(reml2,symmetric=TRUE) + er$values <- abs(er$values) + me <- max(er$values)*.Machine$double.eps^.5 + er$values[er$values4) step <- 4*step/ms + + ## return the coefficient estimate, the reml grad and the Newton step... + list(beta=beta,grad=grad,step=step,db=dift$d1b,PP=PP,R=R,piv=piv,rank=r, + hess=hess,ldetS=ldS$ldetS,ldetXXS=ldetXXS) +} ## Sl.fitChol + Sl.fit <- function(Sl,X,y,rho,fixed,log.phi=0,phi.fixed=TRUE,rss.extra=0,nobs=NULL,Mp=0,nt=1) { ## fits penalized regression model with model matrix X and ## initialised block diagonal penalty Sl to data in y, given @@ -508,7 +742,7 @@ Sl.fit <- function(Sl,X,y,rho,fixed,log.phi=0,phi.fixed=TRUE,rss.extra=0,nobs=NU phi <- exp(log.phi) if (is.null(nobs)) nobs <- n ## get log|S|_+ stably... - ldS <- ldetS(Sl,rho,fixed,np,root=TRUE) + ldS <- ldetS(Sl,rho,fixed,np,root=TRUE,nt=nt) ## apply resulting stable re-parameterization to X... X <- Sl.repara(ldS$rp,X) ## get pivoted QR decomp of augmented model matrix (in parallel if nt>1) @@ -532,14 +766,16 @@ Sl.fit <- function(Sl,X,y,rho,fixed,log.phi=0,phi.fixed=TRUE,rss.extra=0,nobs=NU ## its derivatives.... reml <- (rss.bSb/phi + (nobs-Mp)*log(2*pi*phi) + ldetXXS - ldS$ldetS)/2 - reml1 <- ((dift$rss1[!fixed] + dift$bSb1[!fixed])/phi + - dXXS$d1[!fixed] - ldS$ldet1)/2 - reml2 <- ((dift$rss2[!fixed,!fixed] + dift$bSb2[!fixed,!fixed])/phi + - dXXS$d2[!fixed,!fixed] - ldS$ldet2)/2 + reml1 <- (dXXS$d1[!fixed] - ldS$ldet1 + # dift$bSb1[!fixed]/phi)/2 + (dift$rss1[!fixed] + dift$bSb1[!fixed])/phi)/2 + + reml2 <- (dXXS$d2[!fixed,!fixed] - ldS$ldet2 + #dift$bSb2[!fixed,!fixed]/phi)/2 + (dift$rss2[!fixed,!fixed] + dift$bSb2[!fixed,!fixed])/phi)/2 ## finally add in derivatives w.r.t. log.phi if (!phi.fixed) { n <- length(reml1) reml1[n+1] <- (-rss.bSb/phi + nobs - Mp)/2 + #d <- c(-(dift$bSb1[!fixed]),rss.bSb)/(2*phi) d <- c(-(dift$rss1[!fixed] + dift$bSb1[!fixed]),rss.bSb)/(2*phi) reml2 <- rbind(cbind(reml2,d[1:n]),d) } @@ -562,7 +798,7 @@ fast.REML.fit <- function(Sl,X,y,rho,L=NULL,rho.0=NULL,log.phi=0,phi.fixed=TRUE, ## structurally un-identifiable coefficients. ## Note that lower bounds on smoothing parameters are not handled. maxNstep <- 5 - + if (is.null(nobs)) nobs <- nrow(X) np <- ncol(X) if (nrow(X) > np) { ## might as well do an initial QR step @@ -637,7 +873,7 @@ fast.REML.fit <- function(Sl,X,y,rho,L=NULL,rho.0=NULL,log.phi=0,phi.fixed=TRUE, rho1 <- L%*%(rho + step)+rho.0; if (!phi.fixed) log.phi <- rho1[nr+1] trial <- Sl.fit(Sl,X,y,rho1[1:nr],fixed,log.phi,phi.fixed,rss.extra,nobs,Mp,nt=nt) } - if (k==35 && trial$reml>best$reml) { ## step has failed + if ((k==35 && trial$reml>best$reml)||(sum(rho != rho + step)==0)) { ## step has failed step.failed <- TRUE break ## can get no further } @@ -756,7 +992,7 @@ Sl.Xprep <- function(Sl,X,nt=1) { ## this routine applies preliminary Sl transformations to X ## tests for structural identifibility problems and drops ## un-identifiable parameters. - X <- Sl.initial.repara(Sl,X) ## apply re-para used in Sl to X + X <- Sl.initial.repara(Sl,X,inverse=FALSE,both.sides=FALSE,cov=FALSE,nt=nt) ## apply re-para used in Sl to X id <- ident.test(X,attr(Sl,"E"),nt=nt) ## deal with structural identifiability ## id contains drop, undrop, lambda if (length(id$drop)>0) { ## then there is something to do here @@ -770,7 +1006,7 @@ Sl.Xprep <- function(Sl,X,nt=1) { } ## end Sl.Xprep -Sl.postproc <- function(Sl,fit,undrop,X0,cov=FALSE,scale = -1) { +Sl.postproc <- function(Sl,fit,undrop,X0,cov=FALSE,scale = -1,L,nt=nt) { ## reverse the various fitting re-parameterizations. ## X0 is the orginal model matrix before any re-parameterization ## or parameter dropping. Sl is also the original *before parameter @@ -778,16 +1014,17 @@ Sl.postproc <- function(Sl,fit,undrop,X0,cov=FALSE,scale = -1) { np <- ncol(X0) beta <- rep(0,np) beta[undrop] <- Sl.repara(fit$rp,fit$beta,inverse=TRUE) - beta <- Sl.initial.repara(Sl,beta,inverse=TRUE) + beta <- Sl.initial.repara(Sl,beta,inverse=TRUE,both.sides=TRUE,cov=TRUE,nt=nt) if (cov) { d1b <- matrix(0,np,ncol(fit$d1b)) ## following construction a bit ugly due to Sl.repara assumptions... d1b[undrop,] <- t(Sl.repara(fit$rp,t(fit$d1b),inverse=TRUE,both.sides=FALSE)) - for (i in 1:ncol(d1b)) d1b[,i] <- Sl.initial.repara(Sl,as.numeric(d1b[,i]),inverse=TRUE) ## d beta / d rho matrix + for (i in 1:ncol(d1b)) d1b[,i] <- + Sl.initial.repara(Sl,as.numeric(d1b[,i]),inverse=TRUE,both.sides=TRUE,cov=TRUE,nt=nt) ## d beta / d rho matrix PP <- matrix(0,np,np) PP[undrop,undrop] <- Sl.repara(fit$rp,fit$PP,inverse=TRUE) - PP <- Sl.initial.repara(Sl,PP,inverse=TRUE) + PP <- Sl.initial.repara(Sl,PP,inverse=TRUE,both.sides=TRUE,cov=TRUE,nt=nt) #XPP <- crossprod(t(X0),PP)*X0 #hat <- rowSums(XPP);edf <- colSums(XPP) XPP <- crossprod(t(X0),PP) @@ -796,9 +1033,13 @@ Sl.postproc <- function(Sl,fit,undrop,X0,cov=FALSE,scale = -1) { edf <- diag(F) edf1 <- 2*edf - rowSums(t(F)*F) ## edf <- rowSums(PP*crossprod(X0)) ## diag(PP%*%(t(X0)%*%X0)) - if (scale<=0) scale <- fit$rss/(fit$nobs - sum(edf)) + if (scale<=0) { + scale <- fit$rss/(fit$nobs - sum(edf)) + } Vp <- PP * scale ## cov matrix ## sp uncertainty correction... + ## BUG: possibility of L ignored here. + if (!is.null(L)) d1b <- d1b%*%L M <- ncol(d1b) ev <- eigen(fit$outer.info$hess,symmetric=TRUE) ind <- ev$values <= 0 diff --git a/R/gam.fit3.r b/R/gam.fit3.r index 8c6efaa..9f7b19c 100755 --- a/R/gam.fit3.r +++ b/R/gam.fit3.r @@ -70,12 +70,28 @@ get.Eb <- function(rS,rank) { q <- nrow(rS[[1]]) S <- matrix(0,q,q) for (i in 1:length(rS)) { - Si <- rS[[i]]%*%t(rS[[i]]) + Si <- tcrossprod(rS[[i]]) ## rS[[i]]%*%t(rS[[i]]) S <- S + Si/sqrt(sum(Si^2)) } t(mroot(S,rank=rank)) ## E such that E'E = S } ## get.Eb +huberp <- function(wp,dof,k=1.5,tol=.Machine$double.eps^.5) { +## function to obtain huber estimate of scale from Pearson residuals, simplified +## from 'hubers' from MASS package + s0 <- mad(wp) ## initial scale estimate + th <- 2*pnorm(k) - 1 + beta <- th + k^2 * (1 - th) - 2 * k * dnorm(k) + for (i in 1:50) { + r <- pmin(pmax(wp,-k*s0),k*s0) + ss <- sum(r^2)/dof + s1 <- sqrt(ss/beta) + if (abs(s1-s0) 0 - kd[ind] <- wd[ind]*median(wp[ind]/wd[ind]) - ind <- wd < 0 - kd[ind] <- wd[ind]*median(wp[ind]/wd[ind]) - robust <- (sum(kd^2)+extra)/dof - ## force estimate to lie between deviance and pearson estimators - if (pearson > deviance) { - if (robust < deviance) robust <- deviance - if (robust > pearson) robust <- pearson - } else { - if (robust > deviance) robust <- deviance - if (robust < pearson) robust <- pearson + if (extra==0) robust <- huberp(wp,dof) else { + ## now scale deviance residuals to have magnitude similar + ## to pearson and compute new estimator. + kd <- wd + ind <- wd > 0 + kd[ind] <- wd[ind]*median(wp[ind]/wd[ind]) + ind <- wd < 0 + kd[ind] <- wd[ind]*median(wp[ind]/wd[ind]) + robust <- (sum(kd^2)+extra)/dof + ## force estimate to lie between deviance and pearson estimators + if (pearson > deviance) { + if (robust < deviance) robust <- deviance + if (robust > pearson) robust <- pearson + } else { + if (robust > deviance) robust <- deviance + if (robust < pearson) robust <- pearson + } } list(pearson=pearson,deviance=deviance,robust=robust) } @@ -592,6 +610,7 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(), p.weights=as.double(weg),g1=as.double(g1),g2=as.double(g2), g3=as.double(g3),g4=as.double(g4),V0=as.double(V),V1=as.double(V1), V2=as.double(V2),V3=as.double(V3),beta=as.double(coef),b1=as.double(rep(0,nSp*ncol(x))), + w1=as.double(rep(0,nSp*length(z))), D1=as.double(D1),D2=as.double(D2),P=as.double(dum),P1=as.double(P1),P2=as.double(P2), trA=as.double(dum),trA1=as.double(trA1),trA2=as.double(trA2), rV=as.double(rV),rank.tol=as.double(rank.tol), @@ -607,6 +626,7 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(), ## get dbeta/drho, directly in original parameterization db.drho <- if (deriv) T%*%matrix(oo$b1,ncol(x),nSp) else NULL + dw.drho <- if (deriv) matrix(oo$w1,length(z),nSp) else NULL rV <- matrix(oo$rV,ncol(x),ncol(x)) ## rV%*%t(rV)*scale gives covariance matrix @@ -627,21 +647,27 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(), mu <- linkinv(eta) } trA <- oo$trA; - - wpr <- (y-mu) *sqrt(weights/family$variance(mu)) ## weighted pearson residuals - se <- gam.scale(wpr,wdr,n.true-trA,dev.extra) ## get scale estimates - pearson.warning <- NULL - if (control$scale.est=="pearson") { - scale.est <- se$pearson - if (scale.est > 4 * se$robust) pearson.warning <- TRUE - } else scale.est <- if (control$scale.est=="deviance") se$deviance else se$robust - - #pearson <- sum(weights*(y-mu)^2/family$variance(mu)) ## Pearson statistic - - #scale.est <- (pearson+dev.extra)/(n.true-trA) + +# wpr <- (y-mu) *sqrt(weights/family$variance(mu)) ## weighted pearson residuals +# se <- gam.scale(wpr,wdr,n.true-trA,dev.extra) ## get scale estimates +# pearson.warning <- NULL +# if (control$scale.est=="pearson") { +# scale.est <- se$pearson +# if (scale.est > 4 * se$robust) pearson.warning <- TRUE +# } else scale.est <- if (control$scale.est=="deviance") se$deviance else se$robust + + if (control$scale.est%in%c("pearson","fletcher","Pearson","Fletcher")) { + pearson <- sum(weights*(y-mu)^2/family$variance(mu)) + scale.est <- (pearson+dev.extra)/(n.true-trA) + if (control$scale.est%in%c("fletcher","Fletcher")) { ## Apply Fletcher (2012) correction + s.bar = mean(family$dvar(mu)*(y-mu)*sqrt(weights)/family$variance(mu)) + if (is.finite(s.bar)) scale.est <- scale.est/(1+s.bar) + } + } else { ## use the deviance estimator + scale.est <- (dev+dev.extra)/(n.true-trA) + } - #scale.est <- (dev+dev.extra)/(n.true-trA) - reml.scale <- NA + reml.scale <- NA if (scoreType%in%c("REML","ML")) { ## use Laplace (RE)ML @@ -773,12 +799,21 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(), names(residuals) <- ynames names(mu) <- ynames names(eta) <- ynames - wt <- rep.int(0, nobs) - if (fisher) wt[good] <- w else wt[good] <- wf ## note that Fisher weights are returned + ww <- wt <- rep.int(0, nobs) + if (fisher) { wt[good] <- w; ww <- wt} else { + wt[good] <- wf ## note that Fisher weights are returned + ww[good] <- w + } names(wt) <- ynames names(weights) <- ynames names(y) <- ynames - + if (deriv && nrow(dw.drho)!=nrow(x)) { + w1 <- dw.drho + dw.drho <- matrix(0,nrow(x),ncol(w1)) + dw.drho[good,] <- w1 + } + + wtdmu <- if (intercept) sum(weights * y)/sum(weights) else linkinv(offset) @@ -795,16 +830,99 @@ gam.fit3 <- function (x, y, sp, Eb,UrS=list(), list(coefficients = coef, residuals = residuals, fitted.values = mu, family = family, linear.predictors = eta, deviance = dev, - null.deviance = nulldev, iter = iter, weights = wt, prior.weights = weights, - df.null = nulldf, y = y, converged = conv,pearson.warning = pearson.warning, + null.deviance = nulldev, iter = iter, weights = wt, working.weights=ww,prior.weights = weights, + df.null = nulldf, y = y, converged = conv,##pearson.warning = pearson.warning, boundary = boundary,D1=D1,D2=D2,P=P,P1=P1,P2=P2,trA=trA,trA1=trA1,trA2=trA2, GCV=GCV,GCV1=GCV1,GCV2=GCV2,GACV=GACV,GACV1=GACV1,GACV2=GACV2,UBRE=UBRE, UBRE1=UBRE1,UBRE2=UBRE2,REML=REML,REML1=REML1,REML2=REML2,rV=rV,db.drho=db.drho, + dw.drho=dw.drho, scale.est=scale.est,reml.scale= reml.scale,aic=aic.model,rank=oo$rank.est,K=Kmat) } ## end gam.fit3 +Vb.corr <- function(X,L,S,off,dw,w,rho,Vr,nth=0,scale.est=FALSE) { +## compute higher order Vb correction... +## If w is NULL then X should be root Hessian, and +## dw is treated as if it was 0, otherwise X should be model +## matrix. +## dw is derivative w.r.t. all the smoothing parameters and family parametres as if these +## were not linked, but not the scale parameter, of course. Vr includes scale uncertainty, +## if scale extimated... +## nth is the number of initial elements of rho that are not smoothing +## parameters, scale.est is TRUE is scale estimated + M <- length(off) ## number of penalty terms + if (scale.est) { + ## drop scale param from L, rho and Vr... + rho <- rho[-length(rho)] + if (!is.null(L)) L <- L[-nrow(L),-ncol(L),drop=FALSE] + Vr <- Vr[-nrow(Vr),-ncol(Vr),drop=FALSE] + } + ## ??? rho0??? + lambda <- if (is.null(L)) exp(rho) else exp(L[1:M,,drop=FALSE]%*%rho) + + ## Re-create the Hessian, if is.null(w) then X assumed to be root + ## unpenalized Hessian... + H <- if (is.null(w)) crossprod(X) else H <- t(X)%*%(w*X) + if (M>0) for (i in 1:M) { + ind <- off[i] + 1:ncol(S[[i]]) - 1 + H[ind,ind] <- H[ind,ind] + lambda[i+nth] * S[[i]] + } + + R <- try(chol(H),silent=TRUE) ## get its Choleski factor. + if (inherits(R,"try-error")) return(0) ## bail out as Hessian insufficiently well conditioned + + ## Create dH the derivatives of the hessian w.r.t. (all) the smoothing parameters... + dH <- list() + if (length(lambda)>0) for (i in 1:length(lambda)) { + ## If w==NULL use constant H approx... + dH[[i]] <- if (is.null(w)) H*0 else t(X)%*%(dw[,i]*X) + if (i>nth) { + ind <- off[i-nth] + 1:ncol(S[[i-nth]]) - 1 + dH[[i]][ind,ind] <- dH[[i]][ind,ind] + lambda[i]*S[[i-nth]] + } + } + ## If L supplied then dH has to be re-weighted to give + ## derivatives w.r.t. optimization smoothing params. + if (!is.null(L)) { + dH1 <- dH;dH <- list() + if (length(rho)>0) for (j in 1:length(rho)) { + ok <- FALSE ## dH[[j]] not yet created + if (nrow(L)>0) for (i in 1:nrow(L)) if (L[i,j]!=0.0) { + dH[[j]] <- if (ok) dH[[j]] + dH1[[i]]*L[i,j] else dH1[[i]]*L[i,j] + ok <- TRUE + } + } + rm(dH1) + } ## dH now w.r.t. optimization parameters + + if (length(dH)==0) return(0) ## nothing to correct -gam.fit3.post.proc <- function(X,L,object) { + ## Get derivatives of Choleski factor w.r.t. the smoothing parameters + dR <- list() + for (i in 1:length(dH)) dR[[i]] <- dchol(dH[[i]],R) + rm(dH) + + ## need to transform all dR to dR^{-1} = -R^{-1} dR R^{-1}... + for (i in 1:length(dR)) dR[[i]] <- -t(forwardsolve(t(R),t(backsolve(R,dR[[i]])))) + + ## BUT: dR, now upper triangular, and it relates to RR' = Vb not R'R = Vb + ## in consequence of which Rz is the thing with the right distribution + ## and not R'z... + dbg <- FALSE + if (dbg) { ## debugging code + n.rep <- 10000;p <- ncol(R) + r <- rmvn(n.rep,rep(0,M),Vr) + b <- matrix(0,n.rep,p) + for (i in 1:n.rep) { + z <- rnorm(p) + if (M>0) for (j in 1:M) b[i,] <- b[i,] + dR[[j]]%*%z*(r[i,j]) + } + Vfd <- crossprod(b)/n.rep + } + + vcorr(dR,Vr,FALSE) ## NOTE: unscaled!! +} ## Vb.corr + +gam.fit3.post.proc <- function(X,L,S,off,object) { ## get edf array and covariance matrices after a gam fit. ## X is original model matrix, L the mapping from working to full sp scale <- if (object$scale.estimated) object$scale.est else object$scale @@ -817,7 +935,7 @@ gam.fit3.post.proc <- function(X,L,object) { edf1 <- 2*edf - rowSums(t(F)*F) ## alternative ## check on plausibility of scale (estimate) - if (object$scale.estimated&&!is.null(object$pearson.warning)) warning("Pearson scale estimate maybe unstable. See ?gam.scale.") + ##if (object$scale.estimated&&!is.null(object$pearson.warning)) warning("Pearson scale estimate maybe unstable. See ?gam.scale.") ## edf <- rowSums(PKt*t(sqrt(object$weights)*X)) ## Ve <- PKt%*%t(PKt)*object$scale ## frequentist cov @@ -831,20 +949,41 @@ gam.fit3.post.proc <- function(X,L,object) { if (!is.na(object$reml.scale)&&!is.null(object$db.drho)) { ## compute sp uncertainty correction M <- ncol(object$db.drho) ## transform to derivs w.r.t. working, noting that an extra final row of L - ## may be present, relating to scale parameter (for which db.drho is 0 since its a scale parameter) + ## may be present, relating to scale parameter (for which db.drho is 0 since it's a scale parameter) if (!is.null(L)) { object$db.drho <- object$db.drho%*%L[1:M,,drop=FALSE] M <- ncol(object$db.drho) } - ev <- eigen(object$outer.info$hess,symmetric=TRUE) - ind <- ev$values <= 0 - ev$values[ind] <- 0;ev$values[!ind] <- 1/sqrt(ev$values[!ind]) - rV <- (ev$values*t(ev$vectors))[,1:M] + ## extract cov matrix for log smoothing parameters... + ev <- eigen(object$outer.info$hess,symmetric=TRUE) + d <- ev$values;ind <- d <= 0 + d[ind] <- 0;d[!ind] <- 1/sqrt(d[!ind]) + rV <- (d*t(ev$vectors))[,1:M] ## root of cov matrix Vc <- crossprod(rV%*%t(object$db.drho)) - Vc <- Vb + Vc ## Bayesian cov matrix with sp uncertainty + ## set a prior precision on the smoothing parameters, but don't use it to + ## fit, only to regularize Cov matrix. exp(4*var^.5) gives approx + ## multiplicative range. e.g. var = 5.3 says parameter between .01 and 100 times + ## estimate. Avoids nonsense at `infinite' smoothing parameters. +# dpv <- rep(0,ncol(object$outer.info$hess)) +# dpv[1:M] <- 1/10 ## prior precision (1/var) on log smoothing parameters +# Vr <- chol2inv(chol(object$outer.info$hess + diag(dpv,ncol=length(dpv))))[1:M,1:M] +# Vc <- object$db.drho%*%Vr%*%t(object$db.drho) + d <- ev$values; d[ind] <- 0;d <- 1/sqrt(d+1/10) + Vr <- crossprod(d*t(ev$vectors)) + #Vc2 <- scale*Vb.corr(X,L,S,off,object$dw.drho,object$working.weights,log(object$sp),Vr) + ## Note that db.drho and dw.drho are derivatives w.r.t. full set of smoothing + ## parameters excluding any scale parameter, but Vr includes info for scale parameter + ## if it has been estiamted. + nth <- if (is.null(object$family$n.theta)) 0 else object$family$n.theta ## any parameters of family itself + Vc2 <- scale*Vb.corr(R,L,S,off,object$dw.drho,w=NULL,log(object$sp),Vr,nth,object$scale.estimated) + + Vc <- Vb + Vc + Vc2 ## Bayesian cov matrix with sp uncertainty ## finite sample size check on edf sanity... edf2 <- rowSums(Vc*crossprod(R))/scale - if (sum(edf2)>sum(edf1)) edf2 <- edf1 + if (sum(edf2)>sum(edf1)) { + #cat("\n edf2=",sum(edf2)," edf1=",sum(edf1)); + edf2 <- edf1 + } } else edf2 <- Vc <- NULL list(Vc=Vc,Vb=Vb,Ve=Ve,edf=edf,edf1=edf1,edf2=edf2,hat=hat,F=F,R=R) } ## gam.fit3.post.proc @@ -1651,11 +1790,15 @@ bfgs <- function(lsp,X,y,Eb,UrS,L,lsp0,offset,U1,Mp,family,weights, check.derivs <- FALSE;eps <- 1e-5 + uconv.ind <- rep(TRUE,ncol(B)) + for (i in 1:max.step) { ## get the trial step ... - step <- -drop(B%*%initial$grad) + step <- -drop(B%*%initial$grad) + ## following line messes up conditions under which Wolfe guarantees update... + ## step[!uconv.ind] <- 0 ## don't move if apparently converged - don't do this ## unit.step <- step/sqrt(sum(step^2)) ## unit vector in step direction ms <- max(abs(step)) @@ -1798,7 +1941,7 @@ bfgs <- function(lsp,X,y,Eb,UrS,L,lsp0,offset,U1,Mp,family,weights, converged <- FALSE } if (converged) break - + ## uconv.ind <- abs(trial$grad) > score.scale*conv.tol*.1 initial <- trial initial$alpha <- 0 } @@ -2366,7 +2509,7 @@ negbin <- function (theta = stop("'theta' must be specified"), link = "log") { } environment(qf) <- environment(rd) <- environment(dvar) <- environment(d2var) <- - environment(variance) <- environment(validmu) <- + environment(d3var) <-environment(variance) <- environment(validmu) <- environment(ls) <- environment(dev.resids) <- environment(aic) <- environment(getTheta) <- env famname <- paste("Negative Binomial(", format(round(theta,3)), ")", sep = "") structure(list(family = famname, link = linktemp, linkfun = stats$linkfun, diff --git a/R/gam.fit4.r b/R/gam.fit4.r index 09596df..2cf1168 100644 --- a/R/gam.fit4.r +++ b/R/gam.fit4.r @@ -2,7 +2,6 @@ ## Routines for gam estimation beyond exponential family. - dDeta <- function(y,mu,wt,theta,fam,deriv=0) { ## What is available directly from the family are derivatives of the ## deviance and link w.r.t. mu. This routine converts these to the @@ -535,7 +534,7 @@ gam.fit4 <- function(x, y, sp, Eb,UrS=list(), Det2=as.double(dd$Deta2),Dth2=as.double(dd$Dth2),Det.th=as.double(dd$Detath), Det2.th=as.double(dd$Deta2th),Det3=as.double(dd$Deta3),Det.th2 = as.double(dd$Detath2), Det4 = as.double(dd$Deta4),Det3.th=as.double(dd$Deta3th), Deta2.th2=as.double(dd$Deta2th2), - beta=as.double(coef),b1=as.double(rep(0,ntot*ncol(x))), + beta=as.double(coef),b1=as.double(rep(0,ntot*ncol(x))),w1=rep(0,ntot*length(z)), D1=as.double(rep(0,ntot)),D2=as.double(rep(0,ntot^2)), P=as.double(0),P1=as.double(rep(0,ntot)),P2 = as.double(rep(0,ntot^2)), ldet=as.double(1-2*(scoreType=="ML")),ldet1 = as.double(rep(0,ntot)), @@ -551,6 +550,7 @@ gam.fit4 <- function(x, y, sp, Eb,UrS=list(), rV <- T %*% rV ## derivatives of coefs w.r.t. sps etc... db.drho <- if (deriv) T %*% matrix(oo$b1,ncol(x),ntot) else NULL + dw.drho <- if (deriv) matrix(oo$w1,length(z),ntot) else NULL Kmat <- matrix(0,nrow(x),ncol(x)) Kmat[good,] <- oo$X ## rV%*%t(K)%*%(sqrt(wf)*X) = F; diag(F) is edf array @@ -607,6 +607,11 @@ gam.fit4 <- function(x, y, sp, Eb,UrS=list(), ww <- wt <- rep.int(0, nobs) wt[good] <- wf ww[good] <- w + if (deriv && nrow(dw.drho)!=nrow(x)) { + w1 <- dw.drho + dw.drho <- matrix(0,nrow(x),ncol(w1)) + dw.drho[good,] <- w1 + } aic.model <- family$aic(y, mu, theta, weights, dev) # note: incomplete 2*edf needs to be added @@ -619,7 +624,7 @@ gam.fit4 <- function(x, y, sp, Eb,UrS=list(), df.null = nulldf, y = y, converged = conv, boundary = boundary, REML=REML,REML1=REML1,REML2=REML2, - rV=rV,db.drho=db.drho, + rV=rV,db.drho=db.drho,dw.drho=dw.drho, scale.est=scale,reml.scale=scale, aic=aic.model, rank=oo$rank.est, @@ -700,7 +705,7 @@ gam.fit5 <- function(x,y,lsp,Sl,weights=NULL,offset=NULL,deriv=2,family, ## get log likelihood, grad and Hessian (w.r.t. coefs - not s.p.s) ... ll <- family$ll(y,x,coef,weights,family,deriv=1) - ll0 <- ll$l - t(coef)%*%St%*%coef/2 + ll0 <- ll$l - (t(coef)%*%St%*%coef)/2 rank.checked <- FALSE ## not yet checked the intrinsic rank of problem rank <- q;drop <- NULL eigen.fix <- FALSE @@ -773,16 +778,36 @@ gam.fit5 <- function(x,y,lsp,Sl,weights=NULL,offset=NULL,deriv=2,family, coef1 <- coef + step ll <- family$ll(y,x,coef1,weights,family,deriv=1) ll1 <- ll$l - (t(coef1)%*%St%*%coef1)/2 - khalf <- 0 - while (ll1 < ll0 && khalf < 50) { ## step halve until it succeeds... - step <- step/2;coef1 <- coef + step + khalf <- 0;fac <- 2 + while (ll1 < ll0 && khalf < 25) { ## step halve until it succeeds... + step <- step/fac;coef1 <- coef + step ll <- family$ll(y,x,coef1,weights,family,deriv=0) ll1 <- ll$l - (t(coef1)%*%St%*%coef1)/2 if (ll1>=ll0) { ll <- family$ll(y,x,coef1,weights,family,deriv=1) + } else { ## abort if step has made no difference + if (max(abs(coef1-coef))==0) khalf <- 100 } khalf <- khalf + 1 + if (khalf>5) fac <- 5 } ## end step halve + + if (ll1 < ll0) { ## switch to steepest descent... + step <- -.5*drop(grad)*mean(abs(coef))/mean(abs(grad)) + khalf <- 0 + } + + while (ll1 < ll0 && khalf < 25) { ## step cut until it succeeds... + step <- step/10;coef1 <- coef + step + ll <- family$ll(y,x,coef1,weights,family,deriv=0) + ll1 <- ll$l - (t(coef1)%*%St%*%coef1)/2 + if (ll1>=ll0) { + ll <- family$ll(y,x,coef1,weights,family,deriv=1) + } else { ## abort if step has made no difference + if (max(abs(coef1-coef))==0) khalf <- 100 + } + khalf <- khalf + 1 + } if (ll1 >= ll0||iter==control$maxit) { ## step ok. Accept and test coef <- coef + step @@ -1017,7 +1042,7 @@ gam.fit5 <- function(x,y,lsp,Sl,weights=NULL,offset=NULL,deriv=2,family, ret } ## end of gam.fit5 -gam.fit5.post.proc <- function(object,Sl,L) { +gam.fit5.post.proc <- function(object,Sl,L,S,off) { ## object is object returned by gam.fit5, Sl is penalty object, L maps working sp ## vector to full sp vector ## Computes: @@ -1089,13 +1114,27 @@ gam.fit5.post.proc <- function(object,Sl,L) { } ## compute the smoothing parameter uncertainty correction... - if (!is.null(object$outer.info$hess)) { - ev <- eigen(object$outer.info$hess,symmetric=TRUE) - ind <- ev$values <= 0 - ev$values[ind] <- 0;ev$values[!ind] <- 1/sqrt(ev$values[!ind]) + if (!is.null(object$outer.info$hess)) { if (!is.null(L)) object$db.drho <- object$db.drho%*%L ## transform to derivs w.r.t. working - Vc <- crossprod((ev$values*t(ev$vectors))%*%t(object$db.drho)) - Vc <- Vb + Vc ## Bayesian cov matrix with sp uncertainty + ev <- eigen(object$outer.info$hess,symmetric=TRUE) + d <- ev$values;ind <- d <= 0 + d[ind] <- 0;d[!ind] <- 1/sqrt(d[!ind]) + Vc <- crossprod((d*t(ev$vectors))%*%t(object$db.drho)) + #dpv <- rep(0,ncol(object$outer.info$hess));M <- length(off) + #dpv[1:M] <- 1/100 ## prior precision (1/var) on log smoothing parameters + #Vr <- chol2inv(chol(object$outer.info$hess + diag(dpv,ncol=length(dpv))))[1:M,1:M] + #Vc <- object$db.drho%*%Vr%*%t(object$db.drho) + + #dpv[1:M] <- 1/10 ## prior precision (1/var) on log smoothing parameters + #Vr <- chol2inv(chol(object$outer.info$hess + diag(dpv,ncol=length(dpv))))[1:M,1:M] + #M <- length(off) + d <- ev$values; d[ind] <- 0; + d <- d + 1/50 #d[1:M] <- d[1:M] + 1/50 + d <- 1/sqrt(d) + Vr <- crossprod(d*t(ev$vectors)) + #Vc2 <- Vb.corr(R,L,S,off,dw=NULL,w=NULL,log(object$sp),Vr) + + Vc <- Vb + Vc #+ Vc2 ## Bayesian cov matrix with sp uncertainty ## reverse the various re-parameterizations... } else Vc <- Vb Vc <- Sl.repara(object$rp,Vc,inverse=TRUE) @@ -1112,8 +1151,14 @@ gam.fit5.post.proc <- function(object,Sl,L) { ## model. This is larger than edf2 should be, because of bias correction variability, ## but is bounded in a way that is not *guaranteed* for edf2. Note that ## justification only applies to sum(edf1/2) not elementwise + if (!is.null(object$outer.info$hess)) { + ## second correction term is easier computed in original parameterization... + Vc2 <- Vb.corr(R,L,S,off,dw=NULL,w=NULL,log(object$sp),Vr) + Vc <- Vc + Vc2 + } edf1 <- 2*edf - rowSums(t(F)*F) - edf2 <- diag(Vc%*%crossprod(R)) + #edf2 <- diag(Vc%*%crossprod(R)) + edf2 <- rowSums(Vc*crossprod(R)) if (sum(edf2)>sum(edf1)) edf2 <- edf1 ## note hat not possible here... list(Vc=Vc,Vb=Vb,Ve=Ve,edf=edf,edf1=edf1,edf2=edf2,F=F,R=R) diff --git a/R/gamm.r b/R/gamm.r index 6304bc6..167b408 100755 --- a/R/gamm.r +++ b/R/gamm.r @@ -1195,9 +1195,8 @@ gamm <- function(formula,random=NULL,correlation=NULL,family=gaussian(),data=lis # parts of the smooth terms are treated as random effects. The onesided formula random defines additional # random terms. correlation describes the correlation structure. This routine is basically an interface # between the basis constructors provided in mgcv and the gammPQL routine used to estimate the model. -# NOTE: need to fill out the gam object properly -{ - ## if (!require("nlme")) stop("gamm() requires package nlme to be installed") +{ if (inherits(family,"extended.family")) warning("family are not designed for use with gamm!") + control <- do.call("lmeControl",control) # check that random is a named list if (!is.null(random)) diff --git a/R/jagam.r b/R/jagam.r index f9db89f..b19126a 100644 --- a/R/jagam.r +++ b/R/jagam.r @@ -75,7 +75,14 @@ jini <- function(G,lambda) { X[(nobs+1):(nobs+jj),uoff[i]:(uoff[i]+ncol(S)-1)] <- S nobs <- nobs + jj } - qr.coef(qr(X),z) + ## we need some idea of initial coeffs and some idea of + ## associated standard error... + qrx <- qr(X,LAPACK=TRUE) + rp <- qrx$pivot;rp[rp] <- 1:ncol(X) + Ri <- backsolve(qr.R(qrx),diag(1,nrow=ncol(X)))[rp,] + beta <- qr.coef(qrx,z) + se <- sqrt(rowSums(Ri^2))*sqrt(sum((z-X%*%beta)^2)/nrow(X)) + list(beta=beta,se=se) } ## jini jagam <- function(formula,family=gaussian,data=list(),file,weights=NULL,na.action, @@ -154,11 +161,22 @@ sp.prior = "gamma",diagonalize=FALSE) { if (use.weights) jags.stuff$w <- weights if (family$family == "binomial") jags.stuff$y <- G$y*weights ## JAGS not expecting observed prob!! + + ## get initial values, for use by JAGS, and to guess suitable values for + ## uninformative priors... + + lambda <- initial.spg(G$X,G$y,G$w,family,G$S,G$off,G$L) ## initial sp values + jags.ini <- list() + lam <- if (is.null(G$L)) lambda else G$L%*%lambda + jin <- jini(G,lam) + jags.ini$b <- jin$beta + prior.tau <- signif(0.01/(abs(jin$beta) + jin$se)^2,2) ## set the fixed effect priors... if (G$nsdf>0) { - cat(" ## Parameteric effect priors CHECK tau is appropriate!\n",file=file,append=TRUE) - cat(" for (i in 1:",G$nsdf,") { b[i] ~ dnorm(0,0.001) }\n",file=file,append=TRUE,sep="") + ptau <- min(prior.tau[1:G$nsdf]) + cat(" ## Parametric effect priors CHECK tau=1/",signif(1/sqrt(ptau),2),"^2 is appropriate!\n",file=file,append=TRUE,sep="") + cat(" for (i in 1:",G$nsdf,") { b[i] ~ dnorm(0,",ptau,") }\n",file=file,append=TRUE,sep="") } ## Work through smooths. @@ -191,9 +209,10 @@ sp.prior = "gamma",diagonalize=FALSE) { if (seperable) { b0 <- G$smooth[[i]]$first.para if (M==0) { - cat(" ## Note fixed vague prior, CHECK tau...\n",file=file,append=TRUE,sep="") + cat(" ## Note fixed vague prior, CHECK tau = 1/",signif(1/sqrt(ptau),2),"^2...\n",file=file,append=TRUE,sep="") b1 <- G$smooth[[i]]$last.para - cat(" for (i in ",b0,":",b1,") { b[i] ~ dnorm(0, 1e-6) }\n",file=file,append=TRUE,sep="") + ptau <- min(prior.tau[b0:b1]) + cat(" for (i in ",b0,":",b1,") { b[i] ~ dnorm(0,",ptau,") }\n",file=file,append=TRUE,sep="") } else for (j in 1:M) { D <- diag(G$smooth[[i]]$S[[j]]) > 0 b1 <- sum(as.numeric(D)) + b0 - 1 @@ -222,10 +241,7 @@ sp.prior = "gamma",diagonalize=FALSE) { } ## smoothing penalties finished ## Write the smoothing parameter prior code, using L if it exists. - lambda <- initial.spg(G$X,G$y,G$w,family,G$S,G$off,G$L) ## initial sp values - jags.ini <- list() - lam <- if (is.null(G$L)) lambda else G$L%*%lambda - jags.ini$b <- jini(G,lam) + cat(" ## smoothing parameter priors CHECK...\n",file=file,append=TRUE,sep="") if (is.null(G$L)) { if (sp.prior=="log.uniform") { diff --git a/R/mgcv.r b/R/mgcv.r index faa0542..ba0de77 100755 --- a/R/mgcv.r +++ b/R/mgcv.r @@ -398,7 +398,10 @@ gam.side <- function(sm,Xp,tol=.Machine$double.eps^.5,with.pen=FALSE) # missed. # Note that with.pen is quite extreme, since you then pretty much only pick # up dependencies in the null spaces -{ m <- length(sm) +{ if (!with.pen) { ## check that's possible and reset if not! + with.pen <- nrow(Xp) < ncol(Xp) + sum(unlist(lapply(sm,function(x) ncol(x$X)))) + } + m <- length(sm) if (m==0) return(sm) v.names<-array("",0);maxDim<-1 for (i in 1:m) { ## collect all term names and max smooth `dim' @@ -736,7 +739,7 @@ olid <- function(X,nsdf,pstart,flpi,lpi) { gam.setup.list <- function(formula,pterms, - data=stop("No data supplied to gam.setup"),knots=NULL,sp=NULL, + data=stop("No data supplied to gam.setup"),knots=NULL,sp=NULL, min.sp=NULL,H=NULL,absorb.cons=TRUE,sparse.cons=0,select=FALSE,idLinksBases=TRUE, scale.penalty=TRUE,paraPen=NULL,gamm.call=FALSE,drop.intercept=FALSE) { ## version of gam.setup for when gam is called with a list of formulae, @@ -863,6 +866,8 @@ gam.setup.list <- function(formula,pterms, G } ## gam.setup.list + + gam.setup <- function(formula,pterms, data=stop("No data supplied to gam.setup"),knots=NULL,sp=NULL, min.sp=NULL,H=NULL,absorb.cons=TRUE,sparse.cons=0,select=FALSE,idLinksBases=TRUE, @@ -910,10 +915,6 @@ gam.setup <- function(formula,pterms, m <- 0 } else m <- length(split$smooth.spec) # number of smooth terms - #pmf <- data - #pmf$formula <- split$pf - #pterms <- attr(model.frame(split$pf,data,drop.unused.levels=TRUE),"terms") # pmf contains all data for parametric part - G <- list(m=m,min.sp=min.sp,H=H,pearson.extra=0, dev.extra=0,n.true=-1,pterms=pterms) ## dev.extra gets added to deviance if REML/ML used in gam.fit3 @@ -958,7 +959,7 @@ gam.setup <- function(formula,pterms, G$smooth <- list() G$S <- list() - if (gamm.call) { ## flag that this is a call from gamm --- some smoothers need to now! + if (gamm.call) { ## flag that this is a call from gamm --- some smoothers need to know! if (m>0) for (i in 1:m) attr(split$smooth.spec[[i]],"gamm") <- TRUE } @@ -1581,9 +1582,9 @@ gam.outer <- function(lsp,fscale,family,control,method,optimizer,criterion,scale object$control <- control if (inherits(family,"general.family")) { - mv <- gam.fit5.post.proc(object,G$Sl,G$L) + mv <- gam.fit5.post.proc(object,G$Sl,G$L,G$S,G$off) object$coefficients <- Sl.initial.repara(G$Sl,object$coefficients,inverse=TRUE) - } else mv <- gam.fit3.post.proc(G$X,G$L,object) + } else mv <- gam.fit3.post.proc(G$X,G$L,G$S,G$off,object) ## note: use of the following in place of Vp appears to mess up p-values for smooths, ## but doesn't change r.e. p-values of course. if (!is.null(mv$Vc)) object$Vc <- mv$Vc @@ -1637,7 +1638,7 @@ estimate.gam <- function (G,method,optimizer,control,in.out,scale,gamma,...) { method <- "REML" ## any method you like as long as it's REML G$Sl <- Sl.setup(G) ## prepare penalty sequence - G$X <- Sl.initial.repara(G$Sl,G$X) ## re-parameterize accordingly + G$X <- Sl.initial.repara(G$Sl,G$X,both.sides=FALSE) ## re-parameterize accordingly ## make sure its BFGS if family only supplies these derivatives if (!is.null(G$family$available.derivs)&&G$family$available.derivs==1) optimizer <- c("outer","bfgs") } @@ -1771,7 +1772,10 @@ estimate.gam <- function (G,method,optimizer,control,in.out,scale,gamma,...) { } lsp <- c(lsp,log.scale) ## append log initial scale estimate to lsp ## extend G$L, if present... - if (!is.null(G$L)) G$L <- cbind(rbind(G$L,rep(0,ncol(G$L))),c(rep(0,nrow(G$L)),1)) + if (!is.null(G$L)) { + G$L <- cbind(rbind(G$L,rep(0,ncol(G$L))),c(rep(0,nrow(G$L)),1)) + #attr(G$L,"scale") <- TRUE ## indicates scale estimated as sp + } if (!is.null(G$lsp0)) G$lsp0 <- c(G$lsp0,0) } ## check if there are extra parameters to estimate @@ -1785,7 +1789,10 @@ estimate.gam <- function (G,method,optimizer,control,in.out,scale,gamma,...) { if (!is.null(G$L)&&nth>0) { L <- rbind(cbind(diag(nth),matrix(0,nth,ncol(G$L))), cbind(matrix(0,nrow(G$L),nth),G$L)) + #sat <- attr(G$L,"scale") G$L <- L + #attr(G$L,"scale") <- sat + #attr(G$L,"not.sp") <- nth ## first not.sp params are not smoothing params } if (!is.null(G$lsp0)) G$lsp0 <- c(th0*0,G$lsp0) } else nth <- 0 @@ -2091,7 +2098,7 @@ gam.control <- function (nthreads=1,irls.reg=0.0,epsilon = 1e-7, maxit = 200, rank.tol=.Machine$double.eps^0.5, nlm=list(),optim=list(),newton=list(),outerPIsteps=0, idLinksBases=TRUE,scalePenalty=TRUE, - keepData=FALSE,scale.est="pearson") + keepData=FALSE,scale.est="fletcher") # Control structure for a gam. # irls.reg is the regularization parameter to use in the GAM fitting IRLS loop. # epsilon is the tolerance to use in the IRLS MLE loop. maxit is the number @@ -2101,7 +2108,7 @@ gam.control <- function (nthreads=1,irls.reg=0.0,epsilon = 1e-7, maxit = 200, # rank.tol is the tolerance to use for rank determination # outerPIsteps is the number of performance iteration steps used to intialize # outer iteration -{ scale.est <- match.arg(scale.est,c("robust","pearson","deviance")) +{ scale.est <- match.arg(scale.est,c("fletcher","pearson","deviance")) if (!is.numeric(nthreads) || nthreads <1) stop("nthreads must be a positive integer") if (!is.numeric(irls.reg) || irls.reg <0.0) stop("IRLS regularizing parameter must be a non-negative number.") if (!is.numeric(epsilon) || epsilon <= 0) @@ -3089,8 +3096,6 @@ residuals.gam <-function(object, type = "deviance",...) ## Start of anova and summary (with contributions from Henric Nilsson) .... - - smoothTest <- function(b,X,V,eps=.Machine$double.eps^.5) { ## Forms Cox, Koh, etc type test statistic, and ## obtains null distribution by simulation... @@ -3323,6 +3328,8 @@ reTest <- function(b,m) { list(stat=stat,pval=pval,rank=rank) } ## end reTest + + testStat <- function(p,X,V,rank=NULL,type=0,res.df= -1) { ## Implements Wood (2013) Biometrika 100(1), 221-228 ## The type argument specifies the type of truncation to use. @@ -3340,7 +3347,9 @@ testStat <- function(p,X,V,rank=NULL,type=0,res.df= -1) { V <- R%*%V[qrx$pivot,qrx$pivot,drop=FALSE]%*%t(R) V <- (V + t(V))/2 ed <- eigen(V,symmetric=TRUE) - + ## remove possible ambiguity from statistic... + siv <- sign(ed$vectors[1,]);siv[siv==0] <- 1 + ed$vectors <- sweep(ed$vectors,2,siv,"*") k <- max(0,floor(rank)) nu <- abs(rank - k) ## fractional part of supplied edf @@ -3385,15 +3394,23 @@ testStat <- function(p,X,V,rank=NULL,type=0,res.df= -1) { B <- ev%*%B%*%ev eb <- eigen(B,symmetric=TRUE) rB <- eb$vectors%*%diag(sqrt(eb$values))%*%t(eb$vectors) + vec1 <- vec + vec1[,k:k1] <- t(rB%*%diag(c(-1,1))%*%t(vec[,k:k1])) vec[,k:k1] <- t(rB%*%t(vec[,k:k1])) } else { - if (k==0) vec <- t(t(vec)*sqrt(1/ed$val[1])) else - vec <- t(t(vec)/sqrt(ed$val[1:k])) + vec1 <- vec <- if (k==0) t(t(vec)*sqrt(1/ed$val[1])) else + t(t(vec)/sqrt(ed$val[1:k])) if (k==1) rank <- 1 } - + ## there is an ambiguity in the choise of test statistic, leading to slight + ## differences in the p-value computation depending on which of 2 alternatives + ## is arbitrarily selected. Following allows both to be computed and p-values + ## averaged (can't average test stat as dist then unknown) d <- t(vec)%*%(R%*%p) d <- sum(d^2) + d1 <- t(vec1)%*%(R%*%p) + d1 <- sum(d1^2) + ##d <- d1 ## uncomment to avoid averaging rank1 <- rank ## rank for lower tail pval computation below @@ -3408,15 +3425,15 @@ testStat <- function(p,X,V,rank=NULL,type=0,res.df= -1) { val[k1] <- (rp - val[k]) } - if (res.df <= 0) pval <- liu2(d,val) else ## pval <- davies(d,val)$Qq else - pval <- simf(d,val,res.df) + if (res.df <= 0) pval <- (liu2(d,val) + liu2(d1,val))/2 else ## pval <- davies(d,val)$Qq else + pval <- (simf(d,val,res.df) + simf(d1,val,res.df))/2 } else { pval <- 2 } ## integer case still needs computing, also liu/pearson approx only good in ## upper tail. In lower tail, 2 moment approximation is better (Can check this ## by simply plotting the whole interesting range as a contour plot!) if (pval > .5) { - if (res.df <= 0) pval <- pchisq(d,df=rank1,lower.tail=FALSE) else - pval <- pf(d/rank1,rank1,res.df,lower.tail=FALSE) + if (res.df <= 0) pval <- (pchisq(d,df=rank1,lower.tail=FALSE)+pchisq(d1,df=rank1,lower.tail=FALSE))/2 else + pval <- (pf(d/rank1,rank1,res.df,lower.tail=FALSE)+pf(d1/rank1,rank1,res.df,lower.tail=FALSE))/2 } list(stat=d,pval=min(1,pval),rank=rank) } ## end of testStat @@ -3837,17 +3854,21 @@ gam.vcomp <- function(x,rescale=TRUE,conf.lev=.95) { ok <- TRUE } } else { ok <- TRUE} ## no id so proceed - if (ok) for (j in 1:length(x$smooth[[i]]$S.scale)) { - if (x$smooth[[i]]$sp[j]<0) { ## sp not supplied - x$sp[k] <- x$sp[k] / x$smooth[[i]]$S.scale[j] - k <- k + 1 - if (kf>0) { + if (ok) { + if (length(x$smooth[[i]]$S.scale)!=length(x$smooth[[i]]$S)) + warning("S.scale vector doesn't match S list - please report to maintainer") + for (j in 1:length(x$smooth[[i]]$S.scale)) { + if (x$smooth[[i]]$sp[j]<0) { ## sp not supplied + x$sp[k] <- x$sp[k] / x$smooth[[i]]$S.scale[j] + k <- k + 1 + if (kf>0) { + x$full.sp[kf] <- x$full.sp[kf] / x$smooth[[i]]$S.scale[j] + kf <- kf + 1 + } + } else { ## sp supplied x$full.sp[kf] <- x$full.sp[kf] / x$smooth[[i]]$S.scale[j] kf <- kf + 1 - } - } else { ## sp supplied - x$full.sp[kf] <- x$full.sp[kf] / x$smooth[[i]]$S.scale[j] - kf <- kf + 1 + } } } else { ## this id already dealt with, but full.sp not scaled yet ii <- idxi[idx%in%x$smooth[[i]]$id] ## smooth prototype @@ -3855,7 +3876,7 @@ gam.vcomp <- function(x,rescale=TRUE,conf.lev=.95) { x$full.sp[kf] <- x$full.sp[kf] / x$smooth[[ii]]$S.scale[j] kf <- kf + 1 } - } + } } ## finished rescaling } ## variance components (original scale) @@ -3971,8 +3992,8 @@ mroot <- function(A,rank=NULL,method="chol") # correct rank if it isn't known in advance. { if (is.null(rank)) rank <- 0 if (!isTRUE(all.equal(A,t(A)))) stop("Supplied matrix not symmetric") - if (method=="svd") - { um<-La.svd(A) + if (method=="svd") { + um <- La.svd(A) if (sum(um$d!=sort(um$d,decreasing=TRUE))>0) stop("singular values not returned in order") if (rank < 1) # have to work out rank @@ -3985,14 +4006,17 @@ mroot <- function(A,rank=NULL,method="chol") d<-um$d[1:rank]^0.5 return(t(t(um$u[,1:rank])*as.vector(d))) # note recycling rule used for efficiency } else - if (method=="chol") - { op <- options(warn=-1) ## don't want to be warned it's not +ve def - L <- chol(A,pivot=TRUE) - options(op) ## reset default warnings + if (method=="chol") { + ## don't want to be warned it's not +ve def... + L <- suppressWarnings(chol(A,pivot=TRUE,tol=0)) piv <- order(attr(L,"pivot")) - if (rank < 1) rank <- attr(L,"rank") - L <- L[,piv,drop=FALSE];L <- t(L[1:rank,,drop=FALSE]) - #if (rank <= 1) dim(L) <- c(nrow(A),1) + ## chol does not work as documented (reported), have to explicitly zero + ## the trailing block... + r <- attr(L,"rank") + p <- ncol(L) + if (r < p) L[(r+1):p,(r+1):p] <- 0 + if (rank < 1) rank <- r + L <- L[,piv,drop=FALSE]; L <- t(L[1:rank,,drop=FALSE]) return(L) } else stop("method not recognised.") @@ -4092,8 +4116,8 @@ initial.spg <- function(x,y,weights,family,S,off,L=NULL,lsp0=NULL,type=1, } else mustart <- mukeep if (inherits(family,"extended.family")) { theta <- family$getTheta() - w <- .5 * family$Dd(y,mustart,theta,weights)$EDmu2*family$mu.eta(family$linkfun(mustart))^2 - } else w <- as.numeric(weights*family$mu.eta(family$linkfun(mustart))^2/family$variance(mustart)) + w <- .5 * drop(family$Dd(y,mustart,theta,weights)$EDmu2*family$mu.eta(family$linkfun(mustart))^2) + } else w <- drop(weights*family$mu.eta(family$linkfun(mustart))^2/family$variance(mustart)) w <- sqrt(w) if (type==1) { ## what PI would have used lambda <- initial.sp(w*x,S,off) @@ -4117,15 +4141,17 @@ initial.spg <- function(x,y,weights,family,S,off,L=NULL,lsp0=NULL,type=1, } -initial.sp <- function(X,S,off,expensive=FALSE) +initial.sp <- function(X,S,off,expensive=FALSE,XX=FALSE) # Find initial smoothing parameter guesstimates based on model matrix X # and penalty list S. off[i] is the index of the first parameter to # which S[[i]] applies, since S[[i]]'s only store non-zero submatrix of # penalty coefficient matrix. +# if XX==TRUE then X contains X'X, not X! { n.p <- length(S) + if (XX) expensive <- FALSE def.sp <- array(0,n.p) if (n.p) { - ldxx <- colSums(X*X) # yields diag(t(X)%*%X) + ldxx <- if (XX) diag(X) else colSums(X*X) # yields diag(t(X)%*%X) ldss <- ldxx*0 # storage for combined penalty l.d. if (expensive) St <- matrix(0,ncol(X),ncol(X)) pen <- rep(FALSE,length(ldxx)) # index of what actually gets penalized @@ -4162,7 +4188,7 @@ initial.sp <- function(X,S,off,expensive=FALSE) } } as.numeric(def.sp) -} +} ## initial.sp diff --git a/R/misc.r b/R/misc.r index c2591ed..b9f9ca0 100644 --- a/R/misc.r +++ b/R/misc.r @@ -52,6 +52,78 @@ mvn.ll <- function(y,X,beta,dbeta=NULL) { list(l=oo$ll,lb=oo$lb,lbb=matrix(oo$lbb,nb,nb),dH=dH) } +## discretized covariate routines... + +XWXd <- function(X,w,k,ts,dt,v,qc,nthreads=1,drop=NULL,ar.stop=-1,ar.row=-1,ar.w=-1) { +## Form X'WX given weights in w and X in compressed form in list X. +## each element of X is a (marginal) model submatrix. Full version +## is given by X[[i]][k[,i],]. list X relates to length(ds) separate +## terms. ith term starts at matrix ts[i] and has dt[i] marginal matrices. +## Terms with several marginals are tensor products and may have +## constraints (if qc[i]>1), stored as a householder vector in v[[i]]. +## check ts and k index start (assumed 1 here) +## if drop is non-NULL it contains index of rows/cols to drop from result + m <- unlist(lapply(X,nrow));p <- unlist(lapply(X,ncol)) + nx <- length(X);nt <- length(ts) + n <- length(w);pt <- 0; + for (i in 1:nt) pt <- pt + prod(p[ts[i]:(ts[i]+dt[i]-1)]) - as.numeric(qc[i]>0) + oo <- .C(C_XWXd,XWX =as.double(rep(0,pt^2)),X= as.double(unlist(X)),w=as.double(w), + k=as.integer(k-1),m=as.integer(m),p=as.integer(p), n=as.integer(n), + ns=as.integer(nx), ts=as.integer(ts-1), as.integer(dt), nt=as.integer(nt), + v = as.double(unlist(v)),qc=as.integer(qc),nthreads=as.integer(nthreads), + ar.stop=as.integer(ar.stop-1),ar.row=as.integer(ar.row-1),ar.weights=as.double(ar.w)) + if (is.null(drop)) matrix(oo$XWX,pt,pt) else matrix(oo$XWX,pt,pt)[-drop,-drop] +} ## XWXd + +XWyd <- function(X,w,y,k,ts,dt,v,qc,drop=NULL,ar.stop=-1,ar.row=-1,ar.w=-1) { +## X'Wy... + m <- unlist(lapply(X,nrow));p <- unlist(lapply(X,ncol)) + nx <- length(X);nt <- length(ts) + n <- length(w);pt <- 0; + for (i in 1:nt) pt <- pt + prod(p[ts[i]:(ts[i]+dt[i]-1)]) - as.numeric(qc[i]>0) + oo <- .C(C_XWyd,XWy=rep(0,pt),y=as.double(y),X=as.double(unlist(X)),w=as.double(w),k=as.integer(k-1), + m=as.integer(m),p=as.integer(p),n=as.integer(n), nx=as.integer(nx), ts=as.integer(ts-1), + dt=as.integer(dt),nt=as.integer(nt),v=as.double(unlist(v)),qc=as.integer(qc), + ar.stop=as.integer(ar.stop-1),ar.row=as.integer(ar.row-1),ar.weights=as.double(ar.w)) + if (is.null(drop)) oo$XWy else oo$XWy[-drop] +} ## XWyd + +Xbd <- function(X,beta,k,ts,dt,v,qc,drop=NULL) { +## note that drop may contain the index of columns of X to drop before multiplying by beta. +## equivalently we can insert zero elements into beta in the appropriate places. + n <- nrow(k);m <- unlist(lapply(X,nrow));p <- unlist(lapply(X,ncol)) + nx <- length(X);nt <- length(ts) + if (!is.null(drop)) { + b <- rep(0,length(beta)+length(drop)) + b[-drop] <- beta + beta <- b + } + oo <- .C(C_Xbd,f=as.double(rep(0,n)),beta=as.double(beta),X=as.double(unlist(X)),k=as.integer(k-1), + m=as.integer(m),p=as.integer(p), n=as.integer(n), nx=as.integer(nx), ts=as.integer(ts-1), + as.integer(dt), as.integer(nt),as.double(unlist(v)),as.integer(qc)) + oo$f +} ## Xbd + +dchol <- function(dA,R) { +## if dA contains matrix dA/dx where R is chol factor s.t. R'R = A +## then this routine returns dR/dx... + p <- ncol(R) + oo <- .C(C_dchol,dA=as.double(dA),R=as.double(R),dR=as.double(R*0),p=as.integer(ncol(R))) + return(matrix(oo$dR,p,p)) +} ## dchol + +vcorr <- function(dR,Vr,trans=TRUE) { +## Suppose b = sum_k op(dR[[k]])%*%z*r_k, z ~ N(0,Ip), r ~ N(0,Vr). vcorr returns cov(b). +## dR is a list of p by p matrices. 'op' is 't' if trans=TRUE and I() otherwise. + p <- ncol(dR[[1]]) + M <- if (trans) ncol(Vr) else -ncol(Vr) ## sign signals transpose or not to C code + if (abs(M)!=length(dR)) stop("internal error in vcorr, please report to simon.wood@r-project.org") + oo <- .C(C_vcorr,dR=as.double(unlist(dR)),Vr=as.double(Vr),Vb=as.double(rep(0,p*p)), + p=as.integer(p),M=as.integer(M)) + return(matrix(oo$Vb,p,p)) +} ## vcorr + + pinv <- function(X,svd=FALSE) { ## a pseudoinverse for n by p, n>p matrices qrx <- qr(X,tol=0,LAPACK=TRUE) @@ -93,7 +165,7 @@ pqr2 <- function(x,nt=1,nb=30) { pbsi <- function(R,nt=1,copy=TRUE) { ## parallel back substitution inversion of upper triangular R -## library(mgcv); n <- 5000;p<-4000;x <- matrix(runif(n*p),n,p) +## library(mgcv); n <- 500;p<-400;x <- matrix(runif(n*p),n,p) ## qrx <- mgcv:::pqr2(x,2);R <- qr.R(qrx) ## system.time(Ri <- mgcv:::pbsi(R,2)) ## system.time(Ri2 <- backsolve(R,diag(p)));range(Ri-Ri2) @@ -117,6 +189,20 @@ pchol <- function(A,nt=1,nb=30) { A } +pforwardsolve <- function(R,B,nt=1) { +## parallel forward solve via simple col splitting... + if (!is.matrix(B)) B <- as.matrix(B) + .Call(C_mgcv_Rpforwardsolve,R,B,nt) + +} + +pcrossprod <- function(A,trans=FALSE,nt=1,nb=30) { +## parallel cross prod A'A or AA' if trans==TRUE... + if (!is.matrix(A)) A <- as.matrix(A) + if (trans) A <- t(A) + .Call(C_mgcv_Rpcross,A,nt,nb) +} + pRRt <- function(R,nt=1) { ## parallel RR' for upper triangular R ## following creates index of lower triangular elements... diff --git a/R/smooth.r b/R/smooth.r index 6cb203a..0bb7573 100755 --- a/R/smooth.r +++ b/R/smooth.r @@ -508,7 +508,7 @@ tensor.prod.model.matrix1 <- function(X) { # X is a list of model matrices, from which a tensor product model matrix is to be produced. # e.g. ith row is basically X[[1]][i,]%x%X[[2]][i,]%x%X[[3]][i,], but this routine works # column-wise, for efficiency -# old version, which is rather slow becuase of using cbind. +# old version, which is rather slow because of using cbind. m <- length(X) X1 <- X[[m]] n <- nrow(X1) @@ -603,8 +603,8 @@ smooth.construct.tensor.smooth.spec <- function(object,data,knots) } XP <- list() if (object$np) # reparameterize - for (i in 1:m) - { if (object$margin[[i]]$dim==1) { + for (i in 1:m) { + if (object$margin[[i]]$dim==1) { # only do classes not already optimal (or otherwise excluded) if (!inherits(object$margin[[i]],c("cs.smooth","cr.smooth","cyclic.smooth","random.effect"))) { x <- get.var(object$margin[[i]]$term,data) @@ -625,7 +625,7 @@ smooth.construct.tensor.smooth.spec <- function(object,data,knots) warning("reparameterization unstable for margin: not done") } else { XP[[i]] <- sv$v%*%(t(sv$u)/sv$d) - Xm[[i]] <- Xm[[i]]%*%XP[[i]] + object$margin[[i]]$X <- Xm[[i]] <- Xm[[i]]%*%XP[[i]] Sm[[i]] <- t(XP[[i]])%*%Sm[[i]]%*%XP[[i]] } } else XP[[i]] <- NULL @@ -1971,7 +1971,9 @@ smooth.construct.re.smooth.spec <- function(object,data,knots) ## a simple random effects constructor method function ## basic idea is that s(x,f,z,...,bs="re") generates model matrix ## corresponding to ~ x:f:z: ... - 1. Corresponding coefficients -## have an identity penalty. +## have an identity penalty. If object$xt=="tensor" then terms +## depending on more than one variable are set up with a te +## smooth like structure (used e.g. in bam(...,discrete=TRUE)) { ## id's with factor variables are problematic - should terms have ## same levels, or just same number of levels, for example? @@ -1981,7 +1983,31 @@ smooth.construct.re.smooth.spec <- function(object,data,knots) form <- as.formula(paste("~",paste(object$term,collapse=":"),"-1")) object$X <- model.matrix(form,data) object$bs.dim <- ncol(object$X) - + + if (object$dim<2) object$xt <- NULL ## no point making it tensor like + + if (!is.null(object$xt)&&object$xt=="tensor") { + ## give object margins like a tensor product smooth... + object$margin <- list() + maxd <- maxi <- 0 + for (i in 1:object$dim) { + form1 <- as.formula(paste("~",object$term[i],"-1")) + object$margin[[i]] <- list(X=model.matrix(form1,data),term=object$term[i]) + d <- ncol(object$margin[[i]]$X) + if (d>maxd) {maxi <- i;maxd <- d} + } + ## now re-order so that largest margin is last... + if (maxi0) { ## use sparse constraints for sparse terms if (sum(sm$X==0)>.1*sum(sm$X!=0)) { ## treat term as sparse if (sparse.cons==1) { xsd <- apply(sm$X,2,FUN=sd) @@ -2894,7 +2922,9 @@ smoothCon <- function(object,data,knots,absorb.cons=FALSE,scale.penalty=TRUE,n=n } else { ## it's not sparse anyway sm$C <- matrix(colSums(sm$X),1,ncol(sm$X)) } - } ## end of sparse constraint handling + } else { ## end of sparse constraint handling + sm$C <- matrix(colSums(sm$X),1,ncol(sm$X)) ## default dense case + } ## conSupplied <- FALSE alwaysCon <- FALSE } else { @@ -3235,6 +3265,7 @@ smoothCon <- function(object,data,knots,absorb.cons=FALSE,scale.penalty=TRUE,n=n if (p>rank) for (i in 1:length(sml)) { sml[[i]]$S[[2]] <- diag(c(rep(0,rank),rep(1,p-rank))) sml[[i]]$rank[2] <- p-rank + sml[[i]]$S.scale[2] <- 1 sml[[i]]$null.space.dim <- 0 } } @@ -3252,6 +3283,7 @@ smoothCon <- function(object,data,knots,absorb.cons=FALSE,scale.penalty=TRUE,n=n for (i in 1:length(sml)) { sml[[i]]$S[[M+1]] <- Sf sml[[i]]$rank[M+1] <- sum(ind) + sml[[i]]$S.scale[M+1] <- 1 sml[[i]]$null.space.dim <- 0 } } diff --git a/changeLog b/changeLog index 0c9db50..0d33909 100755 --- a/changeLog +++ b/changeLog @@ -1,6 +1,55 @@ ** denotes quite substantial/important changes *** denotes really big changes +1.8-7 + +** 'gam' default scale parameter changed to modified Pearson estimator + developed by Fletcher 2012 Biometrika 99(1), 230-237. See ?gam.scale. + +** 'bam' now has a 'discrete' argument to allow discretization of covariates + for more efficient computation, with substantially more parallelization + (via 'nthreads'). Still somewhat experimental. + +* Slightly more accurate smoothing parameter uncertainty correction. Changes + edf2 used for AIC (under RE/ML), and hence may change AIC values. + +* jagam prior variance on fixed effects is now set with reference to data and + model during initialization step. + +* bam could lose offset for small datasets in gaussian additive case. fixed. + +* gam.side now setup to include penalties in computations if fewer data than + coefs (an exceedingly specialist topic). + +* p-value computation for smooth terms modified to avoid an ambiguity in the + choice of test statistic that could lead to p-value changing somewhat between + platforms. + +* gamm now warns if attempt is made to use extended family. + +* step fail logic improved for "fREML" optimization in 'bam'. + +* fix of openMP error in mgcv_pbsi, which could cause a problem in + multi-threaded bam computation (failure to declare a variable as private). + +* Smoothing parameter uncertainty corrected AIC calculations had an + indexing problem in Sl.postproc, which could result in failure of bam with + linked smooths. + +* mroot patched for fact that chol(...,pivot=TRUE) does not operate as + documented on rank deficient matrices: trailing block of triangular factor + has to be zeroed for pivoted crossprod of factor to equal original matrix. + +* bam(...,sparse=TRUE) deprecated as no examples found where it is really + worthwhile (but let me know if this is a problem). + +* marginal model matrices in tensor product smooths now stored in + re-parameterized form, if re-parameterization happened (shouldn't change + anything!). + +* initial.spg could fail if response vector had dim attributes and extended + family used. fixed. + 1.8-6 * Generalization of list formula handling to allow linear predictors to diff --git a/inst/CITATION b/inst/CITATION old mode 100755 new mode 100644 diff --git a/inst/po/po/LC_MESSAGES/R-mgcv.mo b/inst/po/po/LC_MESSAGES/R-mgcv.mo deleted file mode 100644 index 8a8c0209e7489909a17304291d3b7fe304150274..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23123 zcmchf3y@@2dEXD%LH1e*5Fp-gclGGmWv8{Xl8`jom9!5@2zI4qwGsjuw0FAi^xWz0 zds}_Gx3?P%mdsiPf*~Z>EK+O&hQy9ZY=i^CibG&0MXGQTT!~#IDLa%?aZ(PI2iT_U zl#}28JLlfpGrIze?R4qPzxzJU<9mPKIji5j^anrW_wOZu0{m@o7x-tO#=VL`G+qL32j2_63j7p!Blw%(bHIyO9ADjw z0xp7D*C9|Od>g3oe+tyP9|twR&x3owuYu}!+l!pL2iyf}KJN&>KMsnXe-f_mda)nx zZcxuBpy==R`4e9?cgf- zIq(4Zs+aluF(|sd6J$#6aqtH4GvI5$Z-83g6+EPM90EnZHSiMfJ>UX(7TgJb5!Cqq z9n|yBWfFXK*MaKyICvHKUT`<~X>bYrJy6d*kHHW%*8~~b-2&bYJ^#m%bZ}AqT}y?cY@yl`OjU?;LG5H;LE{}fSUhj zL5=rYp!D|-1HJ^|k^J1jpKHK}Kv?8H0p1U8fExdmF!kBs0T2`7?gn2Cu7Kj}PlNpD zKFOaOz^{Uu&o&+sefEOlM+?;RkAX+QCqPKzo`Fza4K~0Cd?ToSPJ^Q3uY%&omqA-M zsC9k=l>Jy>QJUWkAR^&z2bp-oe1Bz}R z1SOB30@cs&gPPAD1-zI^|1iHV2QL8^LEYa6>b-kF{&Vl<4C z@Y8(@)cb!7qLS`!V5~&9%R!BM0(=9w3W|=O14Xz03El->2J^J;1Uw8r3f>BS1$-^| z>}$PT-3Q*m@3UYIejS9x?hQ;pMwX%%VDbcyb31Z`#?nAeKY)iIg1qC9sxziuYxZF{{(D< z&qv8_0Z)Smz|VoO#{DU%{&(;9a+rhn@cRs?b$kufI2YgS>31Ed->cve@crOVfd3JE z3wV-2H2!aZTE~9|Uj+Wpt)71oc#PjM_zLjTpvL((p!)p*Ca33jgZh0Kgk`P|>bZ}D zklg(?D0*DN4mp@KfD6+pZh3(-U9YS9}U0%FyNJ3)cps+4tN&S zx}NcR)(Bn>iq4OMSAstez7*U5Pl5jlyaIe9lV?cxcJP4igA3qANBw&DfVzGR)H**1 z@}K)X{z&c@5lZ3hp!o10D0+Vk)V#k1?gnpRQF^}z>is7{@$({x@&LFGl%9PUycPTk zDE>T~&NS~`pvJik)Vkjq@WY_i_gml_!Eb{a@7QrqhaUs=`|IGP;3Wuu1a1dWeRmMl zI8(3*{tT%3ZG_*?KjH1kL6EN9qu>tkYhVWcpK!l>ub1Cn0bj}W-v?opd**$f&PPD; z<00^J@ZBJy<30sm41N=Q0jNbR0WSn!1Kt5%2EGH_1%3#W{CokFz4`<2R`5CZd;T8- zui^I))N`K%MX%okHO@tE^7DH^z-z!4a{V}{an?Z5GR{XFVOxGP4m`Aa;(p@X)mIEn)Y$pFVgh+hcwu1AIzBfZ~b^R&1j(?d=d|4 zC&qk-mG*gxzmL%VHmyzjHQJ|Wf0rg6>Vx@oKR|mAZ5!=x(ZpxiV;||DKGK0-ru`)C zGqm^8^jV>aU;6w#ns{4&ZU|p~2$T#yM0;zvCcre>r_bNtpusBlC{29R=cj1m(c5W= zo9Qj!-bQGDH5r%#9WbM_4%556IM|0VG6(+~^yS=viz@2BbW0h(;aM`$0W-AL2t zc^>TVkA?52z>m>nTR%km=d^dx?xJm{ZT-BD3%AogNZU!fnD#8%yJ@ed{RHhkT0|>9 z|2Ta4C*Tj$PKVz=4?ZV+pMrOU@1FzLY0@Lfe);*Ee34E62ec9GS7=wz9;WG&(=Mgm zN!vmDr?h`Ydj)O&BfFu`LE2B#zDS$VK1sWT_Omp7&U!F?e2niC+6mg#v?1+RX)mMc zvp_5Cn~z(?CuyicvloB<(1st9+cb4TA5ajzGR@~EF@d6VIq?qFP`MU-c~X|xt+?Oswu zONXPPpXZ~s!X1jU#wc1%vv#zifrn|l(o5zqGZ|)IjN;)a%{qQ=#d0*zJj%6CqayFy zTUupudNhogHe_Pu{Uj^WJhP#;-c3agdRZOj{V2|&ah7K3C}qSM%kit%T(-W~k4MAw z^k403gZ5E)@Lk88z|bx8oLl4Jv}g^JQ4*byh(tva4_j--`uJ3w_C(vrdKe_b)qL2G zvsMzNqhuJ5ppiSAiyLrt5>3{a4UBB(;e~h@_Y;=gig>(VKwTb)hl%xGq7ogR2G=iu7B+g7llKx;cMNHdC zuX?7JW)LhIAj@f(J&JJN<40_cX@+{`Qt7NHyDVNOY2SRqGBx{_u3I!9{1eQEAC;Sc69WLe03EjN5ui83^Gg0iX`dlA?rBF@^Qycis_mxLX6Rx7BVi9 z)iKkt*CjKy&{m$TuBNS&$%F;A<53)*3RS{3_40;&&3o$Y}g^5Pf7NfM2Ved2Q6n7n%a|7W=TR!3H_5yZcr@IScLYS!P80vQ7 zezQnM45 zDww;~an{lpJmqB_6BRt5na5zE=FAgX5wy*$kz7G$CdwU81_{h<*LGi4t2FwA0jm*0 zXTHDk9k`;t8E4)`{-S(IrG+av7Fjqn6Kd5rnd4| zW0H!bpCUS50M2{QJm9RC_PH}0PIt5J3{QmN&)ee`mP4k`2Poe8;hRRO9Na*p8Ksim z((|F3#)_u9XOs-0f|*C7Av85Qw8#8H*{~eMUC8Z)o?6y2HK;{k#rN&Z-sD zzIV~}-3wU$(qJGCqZ}8!CLbKEN78OOnWdT-Vad24%w!Z3E5<`Jkp!N4y_Zj_U@Ykq zV?_6)1b@vp=6U$1ByQ>20qn+}rQ(g1{qw@rwf?tZ3*&8|JOOhiBKq^<4S-J9}p z12O5KC??GKpe7%8a%I}6WVdqC1t!1lND^I%u#aX6XO0(z>Q+V?k0hUAdT1RG91)S2 z3WOkt*L=K(`SR`$8EEz5qKI~2%XYYvB6`C-sh4=~U^fR7CyPO2Zg&2`P2q+;QSsWv zrF&AS<|SIO`T1d&V)>+(C|)z2U^Z=>TaTcpqD=;E+D?3Fp-V|I$oUEvf^Xz%vWl0> zhdg6kjr*1)Kqpf}Or$(d8TgiRP!c4*;LWaSC=MosCdgu%jgk)J)l9UOl>`SpR7Rue zl?1Of+f2n~#<~?tRVs<`>d_Y!WG+`03!HG45Z&S|wm!T&>h5L|=B!v76u{x9{V}mX zUKFX4nw65mfs2}|`D|(3%5;Cz=EQLMggF!HF=)|RgZ_47n6&avmKJue51mW4lv!90 z(~c@hfiOc^2YVIdw2U@o*Cfk+0uG*qP!R%Fku#!GW^ox|5E*s>BavM{#U+J| zF8tau{qwJqc)F0Qc(0cB2r_XvA-6(2rqLcz5^}a$V|1>1fh_L)2g_$rGHISq4OYos zEhSgpj8z-=Vr38nx*Bkr`IW_fOW=*I@L_mr)2o{T^04HRFVKhX9xq8IrhX|k) zHo#zvh#5DtcZKT=DMQT5{P_ZJ^btX}hz=SR6j3Q7!GQvHWp+MbndHN+H--}5V%$fn zh{rIUlqWJv%+p?v#I_fm^a9hU{G}tSQ1GB^6j5l6mX$&g1hy>AwvKov+>{UnA9Mj4 z;?_!?lfA0~H91uJuv}-J*Mu}v+*J;b&EQ*tn37fdRgyoA3f{}$LkCMdfHL08Wh4O* z7_wM+hd=gD_40fWE!`vrQHChz%(FhMnty8V_w>{?-{C1%1$B_cNs@NfcuXbh5}2tByD!mEn6F~#H5A-a$w?UU%p?bf1R z<$J-z;OuUW_BiDdxvz`Xb7?xC^C*NEmdVW0Vwrh+&mL-m9wiO7`xBrVIv9;2s=0w3%Upoi5zC|NI+eA85sh*7wE@4-RMB!28a!aZdF6*H;p1M_{A0obTW|c6V zuwrGX45$tnUN$3Lzb#3;%^}TE>>j_2$YpJ!Ojt zxjLShbjC`pBCj`@b&-eXsHR?!_CDvZ0Jt*fFbZ}XSGT*2aw5`nAk8+54Gck|p5~^6 z&EMK0Y?zH&*Qx=%LSSCHCT?-mY6Z@GZ%>zebUuEn?Wj*d<#EtsQQGpQ9_4>?`xJz& z;#qpKE>Du=S`;rE9+E_yd`wc$JFvhPKtp_3#Vd;P{C)FMl#XRDis+=TMl_{bUNHpjdjc=^E6?!9kuW$xy^fo0=Fa!RX9>M0npyBB!YXi{CT2V27(`h;h5I^=@5tF~9i zP%y#5;W>4%$WIS>))g*9VP%wDEhNUrDSg~!Ay(62aLclOI`dw3OuS^f7Fv^!Q0X5rX364p*O*7&WwkOE@7h2BST~uXjNKQ zO)ix19vZf=FzmdnPz$5iEk!VkjH~67Nm9HaF2f+HkfzEil#{W}$F@sg&nLqn5$H07 zs%b?oSLv=#w1SzgFPlm`QjGa+oIp{zlP%GG&`g*>r41i!NwLoXnU5J``f55}g^pDw z?`!v&u#+_KX`Bkom# z<~c8^sd9+ak5^4oH$&)ojq?J@l~C=j=%Fyi2&u|?S;sZOo|}jGkE|A{o7Ce4mGE1b z)9tPdg3C%lFYQtu>k(*$=vci_z$9q)o1-muU!7!0F=;Fz(*~Y9ZN(RakAS6?+WImc zWYcBEITRUqK>e^pi<;f(c@fhl3ATFLbZpp#J#SLDvf@(}XL%a~YVv+!lCX!DEDZK9 z?VNjy+>4DpRGgX&=?uZYmK8^LkQ>J&e(dLsrgP6DT0u^uafb7!tP}!ZcgzE@ z-p3xYS@wRXbt+nFUcb0><2EYc7?s7NMJH`99vpXy#S{5*w8EELhx1j@ZO86Dv3Q6r z4(aORVKVsTXz!k->lXK1vv|#(XlZHr+G}}c@py8|%u4;p-o-sPEMBt|EnUC7wD;9} zmiFw~c7(0@#S@UQpca%5m!pk`$KAM?MTgmIV4H1U&we)B_Vd<`>a}Do{hN6cTNQVb z#S=-~|32PR4fw%hN46ck^XP3gL6(|(wjILspqh&(P-F}aoAXx>dIXN7X6#$yj`!bt z;`YTG>gP0l_7fLxBerd0LYJc(R?^Y7V?AwTEZ&ZwVYsq^{VevA;`y)p7qb2Pm!exJ zj&S?Rr9C&ZQF7H)_I2O>YxQ+Om-p;j+7rF~?RH(y?AggXOT4pp=eGM7kLAeCXz?D5 zWh*)4Cy3uD^2Hl&xbDWqrEPBR^hFx2OwTMiQDbiw}_)j)UYEM4S&#L zIwBoOag?)_G}(A6OE(^&Bz5j=eHU*xoK5?2xIy6|ny|BzZ#>pYNuFU>YnCPJ?ru8h z;`}xq8F%caJH&QRyxyn#b*%Yp8coifoozfqTzT$HG@;8*j8~$DHl5@fkI>N=7d1<| z@oYL_GnKO;dDN&=t47luBgh%WlNk41Xq@qS60Ilgsa61DJg209C^FqnJM`I}(QWMR z;(kA8d2uvjmn7NvJhU0)8f_NM`s~SO=gvI+Q}HC5Qo*a2ZGka^wbl@>VP4G6oh?b7 z(sMWMbvF$50kwfSaDk+msbYnJ4i~{Fxz@X=_~uE{8Tr2N!De z9zSg-{g}OGQT?yGgQ&szkOL$3HESwwTF!Tz2@DGqp(Y7~i`lrFvcwdlH!Tp~5yEAW zDuDw7NOs!no}4>7KzL@n+s!k{3)d~1ZmkDaPSThy0h#+~#wMIdKZ{xjAp4078sfR( z6!B#e6v;4&uc^(^EFZh!TMw!oET{74^m|^fk@0v}^N*)RO{YSJ95TSywzl zlrZ)_2+`l}8n5Ma6bC|5jPPJ6$qLJN<%9?bEH<0Y;?{JX`*y?x?-F#HSCRX^7Hrcw zcZO~3syl{l=YzN~F^M)FBc{|;CeZ-*gicw?Y*G`(=U%1G#4st!vmSJUFT6XG!aOA| zZ0)3Rq+Cle_|g;WQfAXX+@WsClM#4bV6C!3S}xnU4STd+T@V&6%DUIGQBar%A8T_m zwbMd)H)zg{t{pyUy2ETUv&reX3}rR1jYrxuJj}T>tt4vb5Q^y0&pXi1cRaz~wOC%s z092EO1oMG3wZwTW<5SU~Wb-KQrePF!I9NQU*f7JOm9ujwLpjZ$!MR86Xi7eeXW?v0 zKc?Gg$lRdoyvelFDToJe^FV1!>Dh+Y@szrka4^KUJ8j39LK*IIO;b&!HPb*^n@I|! zGV#{}W6rddYSF^dMB|O8rlUl*ypxBnO9qw%3girOuU>3gxNIooj(UR=B+OHp{k&x3 z5y_5%r!??s8v38iO#$9ylE4#qq*9m9k+b zc*dOBR};CQh=mG@G&d!i+A^ckZEw9ie|W~*;F3U^i06$wsX}%_8l9mr8{vs~-ZSMx z2ZwQYPF(0t-daR@AFuXiuvyP-!b}o&#M{nksMQau{s#7;G zS4+0^QipCfjj~yi@j#{T`2RH>qs?TCk}j~$NQZKQwtD96HeL0kh61e|V~3t}N%J@j zF)@98^U9QcZ(=mPeCv_$02 z{+gLDjg2?y+{W(8EWm$kz(h{k1jYMjmJ+jw((THbdc$pVQ{O&dm`&KTCJH{TDx23Z zrPBnVgj7mMa8m?_$s{{>7CVzoulA+1m?#$GsE)AtNWOeFf|s{*qB-UqjGn$5QQJA? zX{b=fg9IC!yFf;DzNbN&Hk)>b+Kk_=0HZzbLUg~$IUm3#8}Cw>g9js+Cn{7#0^t}I9I{Z>;mNK=)6*6{fqrMvc)|e_EpnKLSf7RAnx5_zBHMh%83&8-72|Av za3*U$II^qYE1r80{+X$~u%VQ=*~>GD zdl&OC!VND*Gb^5nt2sG>CuK!RJ08>#$&iiOOEE&m#ypTUM1j*ty zOM2SFL)i6V)|w?9#TrnGaExS^0CjB6cTPCfcSvs(rbUfS$BC%cO3$iLj;*#RkeR*u z_HZ`Fm?g_HWl)X4kkB$5E(#K@F0C$)^=;hWh{if?PD&?BExCZaxI7&+TG+Y0v0Xit zDo!TuLqnZ4^12oK&0>qqylx_$I;9bMQ{Z*J;>Ak1N-fk6M%Z|SC?uG7(EuG#AV$wK zRvh}qqSH6OeN=n4>KkY28omhWgEy0bF2(g}IE>UlNI8sD#w#@m>1{5EnvXg?L5SEW za9e-%OGc$SJguIXm%7B2(V~g4Y2?$y^~wth1eF7(z7#lRg~Z~KW=)1t(ZNJg5e*jF zS)7-*6-g39BaApkh&Ade$1MZf5T$VN3G3@hX^QEx%&DnOwZk5Leb7`$JLjdUoDRb{=Y`nRQ`J zS=T^!>{wLXGZ!b9)JrD{h8rtvq*nD=6?C%tiDE}81SnOoFBbMQ{Q>70sfEZAiC}YnFesWaX+$tC zqOMn((MnyTN{i%(?K#Ual{l87biwk>g^yt=o;EuULo7Kk%NFvTEd%RRbtOj`qxqDH zx|ee7y(PyRtHc;v6*b&sl*-j;?TyKKrW6)v>%g<1WKl);sS_J!9o36`7Nx?16{t`c z9+UAz9ZH=amnXJR;*dd<#>^{mNYN7=x>D$`Tz(y0#MGgK#778(pNkKDJjIoPNnuOP zkdH6(3>Lt_hB*Rl#=^ zBbD56T&7|sZho2iH#M?z;cgqZOP4bHYvLC#^))N5Pi(OaDo2HKI}Au*6dlMzYAf%(-Fz`)sTz}@RW&XCnuD(e z4%sVvws~3kX|5iE;@dy^ASg&yp^gq1<0zaBEjxWa1d0rYoMX&{`3r zIVA71Dz&Dvqu5IU%Z1cB)Mu@bN2i;`p8Yd_)=ZkAVp_R0i`o55MF_+ifs^l>>5U0z z)Rl;E3eCt(nUANcDq5*DQ7PxH;SZplXEvzy&wRGnCQ8=k9Lh<_yY5Yyj0$}h77Rso zlA7A+s3;Lq9x4WYC$8aYkBvt8-D4uKm3Y{maYBF?09lq7nn~;!oVvBI`9kF~4`HWk zIL67sbzhqF@o&s&TZ(e+ji;0|Pv#Sse*MOn<7;(StcFwo|Hv^m^(I0S+j;^@yMmp= z_b7X~(3>1>`##@fFXVf9(^qh*da@{`E0S~Go2w7tPqbjm?K%9`ApD=JmtteR{0#E` zm!4=ftFplSAvnB~Pf~(I4JOkUpMt~M`h<~P86f3Fs6)yoN4aZ-DoJ$`jvSj>G1INA zQwURhO1!z|fZSsw%OZ@#sq*e_E@Z9T%NExcf5)?OWyEdvM0B^eHJmS-Ta@J8hPd9< zPRTer7TckZn@q6%gL55mW-O1+KR;)2YB)eQ_p}$*;Ap8Gl@U(DqI+1WzyOZaku?qL z3*RN&nA;6<4Q%RPE-LHCUD8kjzDnQ24gzksDz}I8c%=0v!C^`Bp0zxfhniY-LLudM z*p0$RSp=qSF>e{!Ju9mol@Kr(g^v(DO@eWOla0}!v0Uw>h1o#QDg`Fy2%&AYsx6(e zb=+mqCUaHkKa4adEIvYw%X55xfdhVJ=vZ~GUN_ZYe4JiM-V6b;Af}W-wkL+U+q$B-sZMV5d3v^&k?^Jz0sm3S*I&bt!w@@^ zYbDp6x9&RE+%@?x`EC(iqt$A-@AJ)J!})9gp}{$Bt#Wzg(J@5Ci7-)4wX`>zC#lTd zWZFs{KBHF2b2eS?hZv;h8l{UVXA6)|S0+@phoq$3Z^>8NW##!r1^Nr<61l1L5lC_}#!5~UqRVe1U?7~%a zZPRO_bnK{GoM~^lqYiylW3EGGqhsdi|7QR?Pgn-{#t3w43nNqS{NM)tU1RwgyqCBP3$^%JosI(ecSS6VF^!nUzWMJ0DHA@fg`i zfdwlK5agSUc_s`IG9pstdye)qF3{BZ$_KU zoIA5^6He(&IfY63!J$kdT-(#tWEHg5B?Y`1wI294xA|ahARj1vtsN6A;yCD-S68Us zo^4Z$;NS0*TXHpP=7Lyi1pGf#n5%6pcsVe?8qYnn$rjcUK-pT=nbMHdJf%)pCD>4# zqHpam&2XBy<-QI;iP`uTYDcxJlfa5veYVTE!i8(V_AB?c>}7vjB-}lAF3nM2w|(OD%;2l zJ@9WkhV0o7Q~pf}RxpQMo2(6w)q!`k;7f8yD(61uLv@W@Du~V>n&h#+=o5PqVUt2? z9@Toa9Z{uV*HnAs5USpPt<9$*XxjxOJA}KW)vkU9FA&j`7{0L#Fjhbv&*3lW}~S T!k2ZOCiEMR;i1S!mG1um?LnLz diff --git a/inst/po/po/LC_MESSAGES/mgcv.mo b/inst/po/po/LC_MESSAGES/mgcv.mo deleted file mode 100644 index d44523296d77a2cf492fc62a893a86cbacb2d2b0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2576 zcmbVNOLJQ_6c$h(HRV-c2g4~$>z3rYcAC=EZTd>ml9E>)!?X(?^1ZU7_+IHo(v9mo z-Lyl84Le{_Is<=z1x&ZyFpb%;WM8(-z>*)pr)xXu8qLU`qTZz75FW%0{j#B1@Ptlj9mk2pwG{* zz#8x|5bhM?0DciBdu6y~CE%;0^|Qd&Fd<$HpM#e%;dU{64iR!O;M(qZX?MjKTww@% zI4+VCCZP>(sV#cKP^RQoCMeX~+(_>HDrrf@oz%*b+u(|9Dni+oBBF%flu4FQBsxY2 zOS(;MEFzt-i;8sS{7Fk^Dx#i|Fy=ILoRK$+8+90F#)#+;yXc%qQs=)*wJnT0dW@7- zf!)fu5hES8&}ghSAV55dLanYfE9^3tF&ruMq?0_BImAVxO`qD@j5I53aizI%rLow& zL2HfGOG^vO-i+H2ByKvc+}NvFS_ff;pi_k0ibr>c2o4JHJ;`E7Tw9x4TBm>-Ty+tB zTZU4ouwTgNNW^Sis!kSjL(9W(Q*Q0p&tZIQT^mR3!hhD}rXzN}a9Wd1cpMk@LY?;6 z4V{tCgfo3u_VI=rBJ7U+mdc?7()Fn)-G)!RcS?ED#>JH@3v(YX%zacTtR$Ir$o{sV zspMQ#B#9#vh%SwNy=@vxB`Pf89T}1lTbVS7-X>amsSc&w@I;_($U1JEKicMT<}=i$ zC9+ux3nCgCm@Jaq3Ac}DEx4~!Cli#&OpYQiBGJC&9YMV9txi{~gkCYjma8zk9TnZ+ z(27IB$e$VbGY(&2If00*NTiRvLWetS&FD=Lx?nL1J`u*Ef9sUM6RK5C1+~+qhDcF? zVA*zL6kN(WHfZWPmDX09!JHAr?ZG@UQm3hE?PO3r5mcwJtWQr*R52hJG{m;_hWF&A zg6d3gqDHk-_1e@#wFbE*Ze7qcTv_B-n>r0{XI*Y{n&(Q2n9jzJoz7bb(Hk;wWZSX;_|{?4r-NZX-=bI6x`~kh|l2>97pw~I)k?2#tJw4 zar1I8vnS`>ZwnJF;AuoCd7Wlj(v{ZY%f)#v2asTp5Un#hY-qnECvrp zyTL=!T_XnfW36C=cWob!5X)GGd5fdnb*d!Uo^I)=&sI>^ox7N@IS5eg+?TsR%=+@8(gXS;P@CS-Eo$cjBpso1Xl1kMDeiCz{|q#9N4cAVWtgY)pOYYmdIUy0SX>er^$V zL3uu1J9k4|8_U6ch(Do(%WV4S!KT_h6i_OUsIa`pg%FH%>(K*4XqFCS63>Jg!p}vQ z=Ui#hR<~l=L*OH)DAG^lprAxV0C#*kkkFpg(Kt*tuR{^z^RDuy0F~vfzUP6#9X?6C z1p@4MQ9dN0oOtFe57VDPHV3yE-ivaHuS3Q}dG+8vw&NsGOXTA&fFGR7d;D2u%bArh zgARg9L7zUsp8LDL;nKmKJ}4Lr$~RS3VMr1>!XR=S4X>nl^nqO3-Sqq2yB$`GpB{t;G0(Ny6Wi^UW%APcby++sPu0d8B=_P?oo( y?{*)4(Ko5yBU`jjxB-_7wk^7GPae8no{fv2" msgstr "skalierte t df müssen >2 sein" @@ -155,41 +153,41 @@ msgid "" "link not available for zero inflated; available link for `lambda' is only " "\"loga\"" msgstr "" -"Link nicht verfügbar für Nullinflation; einziger verfügbarer Link für\n" +"Link nicht verfügbar für Null-Inflation; einziger verfügbarer Link für " "'lambda' ist \"loga\"" msgid "negative values not allowed for the zero inflated Poisson family" msgstr "" -"negative Werte nicht zulässig für die null-inflationierte Poissonfamilie" +"negative Werte nicht zulässig für die null-inflationierte Poisson-Familie" msgid "Non-integer response variables are not allowed with ziP" msgstr "" -"Nicht-ganzzahlige Antwortvariablen nicht zulässig bie\n" -"null-inflationierter Poisson" +"Nicht-ganzzahlige Antwortvariablen nicht zulässig bei " +"null-inflationierter Poisson-Verteilung" msgid "Using ziP for binary data makes no sense" msgstr "" -"Für binäre Daten macht Gebrauch null-inflationierter Poisson keinen Sinn" +"Für binäre Daten macht Gebrauch null-inflationierter Poisson-Verteilung keinen Sinn" msgid "fast REML optimizer reached iteration limit" -msgstr "schnller REML Optimierer erreichte max. Iterationszahl" +msgstr "schneller REML Optimierer erreichte max. Iterationszahl" msgid "unsupported order of differentiation requested of gam.fit3" -msgstr "nicht unterstützte Ordnung der Differentiation für gam.fit3 gefordert" +msgstr "nicht unterstützte Ordnung der Ableitung für gam.fit3 gefordert" msgid "illegal `family' argument" -msgstr "unerlaubtes `family'-Argument" +msgstr "unerlaubtes 'family'-Argument" # http://de.wikipedia.org/wiki/Prädiktor msgid "Invalid linear predictor values in empty model" -msgstr "Ungültige lineare Prognosewerte in leerem Modell" +msgstr "Ungültige Werte des linearen Prädiktors in leerem Modell" msgid "Invalid fitted means in empty model" -msgstr "Ungültig angepasste Mittel in leerem Modell" +msgstr "Ungültige angepasste Mittelwerte in leerem Modell" msgid "Length of start should equal %d and correspond to initial coefs for %s" msgstr "" -"Länge von start sollte gleich %d sein und mit den initialen\n" +"Länge von start sollte gleich %d sein und mit den initialen " "Koeffizienten für %s korrespondieren" msgid "Can't find valid starting values: please specify some" @@ -245,15 +243,15 @@ msgid "" "Non finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam." "contol'" msgstr "" -"Unendliche Ableitungen. Versuchen Sie die Anpassungstoleranz zu\n" +"Unendliche Ableitungen. Versuchen Sie die Anpassungstoleranz zu " "reduzieren! Siehe 'epsilon' in 'gam.control'" msgid "" "Non-finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam." "contol'" msgstr "" -"Unendliche Ableitungen. Versuchen Sie die Anpassungstoleranz zu\n" -"reduzieren! Siehe 'epsilon' in 'gam.control'<" +"Unendliche Ableitungen. Versuchen Sie die Anpassungstoleranz zu " +"reduzieren! Siehe 'epsilon' in 'gam.control'" msgid "Algorithm did not converge" msgstr "Algorithmus konvergierte nicht" @@ -302,7 +300,7 @@ msgid "" "%s link not available for negative binomial family; available links are " "\"identity\", \"log\" and \"sqrt\"" msgstr "" -"%s Link nicht verfügbar für die negativ binomial Familie; verfügare\n" +"%s Link nicht verfügbar für die negativ-binomial-Familie; verfügare " " Links sind \"identity\", \"log\" und \"sqrt\"" msgid "H has wrong dimension" @@ -321,7 +319,7 @@ msgid "p must be in [1,2]" msgstr "p muss aus [1,2] sein" msgid "y must be strictly positive for a Gamma density" -msgstr "y muss für die Gammadichte streng positiv sein" +msgstr "y muss für die Gamma-Dichte streng positiv sein" msgid "y must be an integer multiple of phi for Tweedie(p=1)" msgstr "y muss für Tweedie(p=1) ein ganzzahliges Vielfaches von phi sein" @@ -339,14 +337,14 @@ msgid "scale parameter must be positive" msgstr "Skalenparameter muss positiv sein" msgid "emtpy models not available" -msgstr "leere Modelle nicht verfügabar" +msgstr "leere Modelle nicht verfügbar" # R/mgcv.r + R/gam.fit3.r msgid "Length of start should equal" msgstr "Länge von start sollte gleich sein" msgid "and correspond to initial coefs for" -msgstr "und entsprechend den Anfangs-coefs für" +msgstr "und entsprechend den Anfangs-Koeffizienten für" msgid "Non-finite coefficients at iteration" msgstr "Nicht-endliche Koeffizienten bei der Iteration" @@ -453,7 +451,7 @@ msgid "" "gamm can not handle linked smoothing parameters (probably from use of `id' " "or adaptive smooths)" msgstr "" -"gamm kann mit gelinkten Glättungsparametern nicht umgehen (vermutlich\n" +"gamm kann mit gelinkten Glättungsparametern nicht umgehen (vermutlich " "aus der Nutzung von 'id' oder adaptiven Glättern)" msgid "only one level of smooth nesting is supported by gamm" @@ -516,7 +514,7 @@ msgid "smoothing parameter prior choise not recognised, reset to gamma" msgstr "Glättungsparameterwahl nicht erkannt, falle zurück auf gamma" msgid "coefficient simulation data is missing" -msgstr "Koeffizientensimulationsdaten fehlen" +msgstr "Koeffizienten-Simulationsdaten fehlen" msgid "burnin too large, reset" msgstr "Vorlauf zu groß, zurückgesetzt" @@ -540,13 +538,13 @@ msgid "silly tolerance supplied" msgstr "unangemessene Toleranz angegeben" msgid "argument k must be positive." -msgstr "Arguement k muss positiv sein" +msgstr "Argument k muss positiv sein" msgid "A not square" msgstr "A ist nicht quadratisch" msgid "Can not have more eigenvalues than nrow(A)" -msgstr "Es kann max nrow(A) Eigenwerte geben" +msgstr "Es kann maximal nrow(A) Eigenwerte geben" msgid "nrow(M$X) != length(M$y)" msgstr "nrow(M$X) != length(M$y)" @@ -570,7 +568,7 @@ msgid "initial point very close to some inequality constraints" msgstr "Anfangsparameter sehr nah an einigen Ungleichungsnebenbedingungen" msgid "initial parameters very close to inequality constraints" -msgstr "Anfangsparameter sehr nah zu den Ungleichungsnebenbedingungen" +msgstr "Anfangsparameter sehr nah an den Ungleichungsnebenbedingungen" msgid "ncol(M$C) != length(M$p)" msgstr "ncol(M$C) != length(M$p)" @@ -585,13 +583,13 @@ msgid "M$S[%d] is too large given M$off[%d]" msgstr "M$[%d] ist zu groß zum gegeben M$off[%d]" msgid "Penalized model matrix must have no more columns than rows" -msgstr "Die penalized model matrix darf nicht mehr Spalten als Zeilen haben" +msgstr "Die penalisierte Modellmatrix darf nicht mehr Spalten als Zeilen haben" msgid "Model matrix not full column rank" msgstr "Modellmatrix hat keinen vollen Spaltenrang" msgid "model has repeated 1-d smooths of same variable." -msgstr "Modell hat 1-d-Glättungen der gleichen Variable wiederholt." +msgstr "Modell hat 1-d-Glättungen derselben Variable wiederholt." msgid "`id' linked smooths must have same number of arguments" msgstr "'id' gelinkte Glätter müssen die selbe Anzahl von Argumenten haben" @@ -630,7 +628,7 @@ msgid "" "Later terms sharing an `id' can not have more smoothing parameters than the " "first such term" msgstr "" -"Später auftretende Terme, die eine 'id' teilen, können nicht mehr\n" +"Später auftretende Terme, die eine 'id' teilen, können nicht mehr " "Glättungsparameter haben als der erste solche Term" msgid "Supplied smoothing parameter vector is too short - ignored." @@ -655,14 +653,14 @@ msgstr "Elemente von min.sp dürfen nicht negativ sein." msgid "" "`negbin' with unknown theta and outer iteration is deprecated - use `nb'." msgstr "" -"'negbin' mit unbekanntem theta und äußeren Iterationen ist veraltet -\n" +"'negbin' mit unbekanntem theta und äußeren Iterationen ist veraltet - " "bitte 'nb' nutzen" msgid "unknown outer optimization method." msgstr "Unbekannte äußere Optimierungsmethode" msgid "nlm.fd not available with negative binomial Theta estimation" -msgstr "nlm.fd nicht verfügbar bei der nagativ binomialen Theta Schätzung" +msgstr "nlm.fd nicht verfügbar bei der negativ-binomialen Theta-Schätzung" msgid "nlm.fd only available for GCV/UBRE" msgstr "nlm.fd nur verfügbar für GCV/UBRE" @@ -671,26 +669,26 @@ msgid "" "only outer methods `newton' & `bfgs' supports `negbin' family and theta " "selection: reset" msgstr "" -"nur die äußere Methoden 'newton' & 'bfgs' unterstützen\n" +"nur die äußere Methoden 'newton' & 'bfgs' unterstützen " "'negbin'-Familie und theta-Auswahl: Wird zurückgesetzt" msgid "sorry, general families currently ignore offsets" -msgstr "sorry, allgemeine Familien ignorieren aktuelle Verschiebungen" +msgstr "sorry, allgemeine Familien ignorieren momentan Offsets" msgid "unknown optimizer" msgstr "unbekannter Optimierer" msgid "unknown smoothness selection criterion" -msgstr "unbekanntes Glattheitswahl Kriterium" +msgstr "unbekanntes Glattheitswahl-Kriterium" msgid "Reset optimizer to outer/newton" msgstr "Optimierer auf outer/newton zurückgesetzt" msgid "in.out incorrect: see documentation" -msgstr "in.out falsch: bitte in der Doku nachsehen" +msgstr "in.out falsch: bitte in der Dokumentation nachsehen" msgid "incorrect number of linear predictors for family" -msgstr "falsche Anzahl linearer Prediktoren for diese Familie" +msgstr "falsche Anzahl linearer Prädiktoren für diese Familie" msgid "nthreads must be a positive integer" msgstr "nthreads muss eine positive, ganze Zahl sein" @@ -723,7 +721,7 @@ msgstr "Y muss univariat sein, falls nicht binomisch" msgid "Length of start should equal %d and correspond to initial coefs." msgstr "" "Länge von start sollte %d sein und mit den initialen Koeffizienten " -"Korrespondieren" +"korrespondieren" msgid "" "iterative weights or data non-finite in gam.fit - regularization may help. " @@ -736,17 +734,17 @@ msgid "Step size truncated: out of bounds." msgstr "Schrittgröße verkleinert: Außerhalb der Begrenzungen." msgid "`object' is not of class \"gam\"" -msgstr "'object' ist nicht aus der Klasse »gam«" +msgstr "'object' ist nicht aus der Klasse \"gam\"" msgid "Smoothness uncertainty corrected covariance not available" -msgstr "Glattheitsunsicherheit korrigierte Kovarianz ist nicht verfügbar" +msgstr "Glattheitsunsicherheits-korrigierte Kovarianz ist nicht verfügbar" msgid "Unknown type, reset to terms." -msgstr "Unbekannte Typ, wird auf terms zurückgesetzt." +msgstr "Unbekannter Typ, wird auf terms zurückgesetzt." msgid "predict.gam can only be used to predict from gam objects" msgstr "" -"predict.gam kann nur benutzt werden, um von gam-Objekten vorauszuberechnen" +"predict.gam kann nur benutzt werden, um auf Basis von gam-Objekten vorherzusagen" msgid "newdata is a model.frame: it should contain all required variables" msgstr "" @@ -756,7 +754,7 @@ msgid "not all required variables have been supplied in newdata!" msgstr "nicht alle benötigten Variablen wurden in newdata angegeben!" msgid "type iterms not available for multiple predictor cases" -msgstr "Typ iterms ist für den Fall multipler Prediktoren nicht verfügbar" +msgstr "Typ iterms ist für den Fall multipler Prädiktoren nicht verfügbar" msgid "non-existent terms requested - ignoring" msgstr "nicht existierende Terme angefordert - wird ignoriert" @@ -771,8 +769,8 @@ msgid "" "Pearson residuals not available for this family - returning deviance " "residuals" msgstr "" -"Pearsonresiduen für diese Familie nicht verfügbar - geben\n" -"Devianzresiduen zurück" +"Pearson-Residuen für diese Familie nicht verfügbar - geben " +"Devianz-Residuen zurück" msgid "lambda and h should have the same length!" msgstr "lambda und h sollten die selbe Länge haben!" @@ -796,7 +794,7 @@ msgid "" "p-values for any terms that can be penalized to zero will be unreliable: " "refit model to fix this." msgstr "" -"Die p-Werte für einen Term der auf Null bestraft werden kann sind\n" +"Die p-Werte für einen Term, der auf Null bestraft werden kann, sind " "unzuverlässig: Modell wird neu angepasst, um dies zu korrigieren." msgid "p.type!=0 is deprecated, and liable to be removed in future" @@ -860,7 +858,7 @@ msgid "" "Partial residuals do not have a natural x-axis location for linear " "functional terms" msgstr "" -"Partielle Residuen haben keine natürliche x-Achsen Lage für lineare\n" +"Partielle Residuen haben keine natürliche x-Achsen Lage für lineare " "funktionale Ausdrücke" msgid "no automatic plotting for smooths of more than two variables" @@ -899,14 +897,14 @@ msgid "" "Don't know what to do with parametric terms that are not simple numeric or " "factor variables" msgstr "" -"Weiß nicht anzufangen mit parametrischen Ausdrücken, die weder einfach\n" +"Weiß nichts anzufangen mit parametrischen Ausdrücken, die weder einfach " "numerisch noch Faktorvariablen sind" msgid "View variables must contain more than one value. view = c(%s,%s)." msgstr "View-Variablen müssen mehr als einen Wert enthalten. view = c(%s,%s)" msgid "type must be \"link\" or \"response\"" -msgstr "Typ muss »link« oder »response« sein" +msgstr "Typ muss 'link' oder 'response' sein" msgid "Something wrong with zlim" msgstr "Etwas stimmt nicht mit zlim" @@ -974,7 +972,7 @@ msgstr "ord ist falsch, wird auf NULL zurückgesetzt" msgid "ord contains out of range orders (which will be ignored)" msgstr "" -"ord enthält Ordungen außerhalb des Wertebereichs(, die ignoriert werden)" +"ord enthält Ordungen außerhalb des Wertebereichs (die ignoriert werden)" msgid "by=. not allowed" msgstr "by=. nicht erlaubt" @@ -1001,7 +999,7 @@ msgid "" "single penalty tensor product smooths are deprecated and likely to be " "removed soon" msgstr "" -"Tensorprodukt-Glätter mit einfachem Strafterm sind veraltet und werden\n" +"Tensorprodukt-Glätter mit einfachem Strafterm sind veraltet und werden " "wahrscheinlich bald entfernt" msgid "fx length wrong from t2 term: ignored" @@ -1014,7 +1012,7 @@ msgid "d can not be negative in call to null.space.dimension()." msgstr "d kann im Aufruf von null.space.dimension() nicht negativ sein." msgid "arguments of smooth not same dimension" -msgstr "Argumente der Glättung haben nicht die gleiche Dimension" +msgstr "Argumente der Glättung haben nicht dieselbe Dimension" msgid "components of knots relating to a single smooth must be of same length" msgstr "" @@ -1031,13 +1029,13 @@ msgid "no data to predict at" msgstr "keine Daten zum Vorausberechnen von" msgid "Basis only handles 1D smooths" -msgstr "Basis arbeitet nur mit 1-d-Glättungen" +msgstr "Basis arbeitet nur mit 1D-Glättungen" msgid "number of supplied knots != k for a cr smooth" msgstr "Anzahl der angegebenen Knoten != k für eine cr-Glättung" msgid "F is missing from cr smooth - refit model with current mgcv" -msgstr "F fehlt im cr Glätter - Modell wird mit aktuellem mgcv neu angepasst" +msgstr "F fehlt im cr-Glätter - Modell wird mit aktuellem mgcv neu angepasst" msgid "more knots than unique data values is not allowed" msgstr "mehr Knoten als einheitliche Datenwerte sind nicht erlaubt" @@ -1064,7 +1062,7 @@ msgid "" "knot range is so wide that there is *no* information about some basis " "coefficients" msgstr "" -"Knotenbereich ist so weit, dass er *keine* Information über einige\n" +"Knotenbereich ist so weit, dass er *keine* Information über einige " "Basiskoeffizienten enthält. " msgid "penalty order too high for basis dimension" @@ -1075,7 +1073,7 @@ msgstr "" "Basisdimension ist größer als die Zahl der unterschiedlichen Kovariaten" msgid "fs smooths can only have one factor argument" -msgstr "fs Glätter können nur ein Faktorargument haben" +msgstr "fs-Glätter können nur ein Faktorargument haben" msgid "\"fs\" smooth cannot use a multiply penalized basis (wrong basis in xt)" msgstr "" @@ -1086,7 +1084,7 @@ msgid "\"fs\" terms can not be fixed here" msgstr "\"fs\" Ausdrücke können nicht hier festgelegt werden" msgid "the adaptive smooth class is limited to 1 or 2 covariates." -msgstr "Die adaptive Glätterklasse ist beschränkt auf 1 oder 2 Kovariate." +msgstr "Die adaptive Glätterklasse ist beschränkt auf 1 oder 2 Kovariaten." msgid "penalty basis too large for smoothing basis" msgstr "Straftermbasis ist zu groß für die Glättungsbasis" @@ -1102,21 +1100,21 @@ msgstr "MRF Basisdimension ist zu hoch gesetzt" msgid "data contain regions that are not contained in the knot specification" msgstr "" -"Daten enthalten Gebiete, die nicht in der Knotenspezifikation\n" +"Daten enthalten Gebiete, die nicht in der Knotenspezifikation " "enthalten sind" msgid "" "penalty matrix, boundary polygons and/or neighbours list must be supplied in " "xt" msgstr "" -"Straftermmatrix, Grenzpolygone und/oder die Nachbarliste muss in xt\n" +"Straftermmatrix, Grenzpolygone und/oder die Nachbarliste muss in xt " "angegeben werden" msgid "no spatial information provided!" msgstr "keine räumliche Information angegeben!" msgid "mismatch between nb/polys supplied area names and data area names" -msgstr "area names aus nb/poly und Daten passen nicht zusammen" +msgstr "area names aus nb/poly und Daten passen nicht zusammen" msgid "Something wrong with auto- penalty construction" msgstr "Etwas stimmt nicht mit der automatischen Straftermkonstruktion" @@ -1143,7 +1141,7 @@ msgid "" "A term has fewer unique covariate combinations than specified maximum " "degrees of freedom" msgstr "" -"Ein Ausdruck hat weniger eindeutige Kombinationen von Kovariaten als\n" +"Ein Ausdruck hat weniger eindeutige Kombinationen von Kovariaten als " "die angegebene maximale Zahl von Freiheitsgraden" msgid "s value reduced" @@ -1171,7 +1169,7 @@ msgid "" "handling `by' variables in smooth constructors may not work with the " "summation convention" msgstr "" -"die Handhabung von 'by' Variablen in der Glättungskonstruktion\n" +"die Handhabung von 'by' Variablen in der Glättungskonstruktion " "funktioniert evtl. nicht mit der Summationskonvention" msgid "Can't find by variable" @@ -1179,11 +1177,11 @@ msgstr "Kann nicht über Variable gefunden werden" msgid "factor `by' variables can not be used with matrix arguments." msgstr "" -"Faktor-»by«-Variablen können nicht mit Matrixargumenten benutzt werden." +"Faktor-'by'-Variablen können nicht mit Matrixargumenten benutzt werden." msgid "`by' variable must be same dimension as smooth arguments" msgstr "" -"»by«-Variable muss die gleiche Dimension wie die Glättungsargumente haben" +"'by'-Variable muss die gleiche Dimension wie die Glättungsargumente haben" msgid "Number of prediction and fit constraints must match" msgstr "" @@ -1223,7 +1221,7 @@ msgid "can't soap smooth without a boundary" msgstr "soap Glätter braucht Grenze" msgid "bnd must be a list of boundary loops" -msgstr "bnd muss eine Liste von Grenz-Schleifen" +msgstr "bnd muss eine Liste von Grenz-Schleifen sein" msgid "faulty bnd" msgstr "fehlerhaftes bnd" @@ -1235,7 +1233,7 @@ msgid "data outside soap boundary" msgstr "Daten außerhalb der soap Grenze" msgid "no free coefs in sf smooth" -msgstr "keine freien Koeffizienten in sf Glättung" +msgstr "keine freien Koeffizienten in sf-Glättung" msgid "only deals with 2D case" msgstr "behandelt nur den 2D Fall" diff --git a/po/R-po.po b/po/R-po.po deleted file mode 100755 index 725692f..0000000 --- a/po/R-po.po +++ /dev/null @@ -1,2099 +0,0 @@ -msgid "" -msgstr "" -"Project-Id-Version: mgcv 1.7-19\n" -"Report-Msgid-Bugs-To: bugs@r-project.org\n" -"POT-Creation-Date: 2015-03-30 11:44\n" -"PO-Revision-Date: 2012-08-31 17:12+0100\n" -"Last-Translator: Łukasz Daniel \n" -"Language-Team: Łukasz Daniel \n" -"Language: pl_PL\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " -"|| n%100>=20) ? 1 : 2)\n" -"X-Poedit-SourceCharset: iso-8859-1\n" - -# Recommended/mgcv/R/mgcv.r: 3151 -# stop("argument is not a gam object") -#, fuzzy -msgid "'family' argument seems not to be a valid family object" -msgstr "argument nie jest obiektem klasy \"gam\"" - -# Recommended/mgcv/R/mgcv.r: 1861 -# stop("Can't find valid starting values: please specify some") -# Recommended/mgcv/R/gam.fit3.r: 255 -# stop("Can't find valid starting values: please specify some") -#, fuzzy -msgid "cannot find valid starting values: please specify some" -msgstr "" -"Nie można znaleźć poprawnych wartości startowych: proszę określić kilka" - -msgid "Deviance = %s Iterations - %d" -msgstr "" - -msgid "Non-finite deviance" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 1934 -# warning(paste("Non-finite coefficients at iteration",iter)) -# Recommended/mgcv/R/gam.fit3.r: 324 -# warning("Non-finite coefficients at iteration ", -# iter) -#, fuzzy -msgid "non-finite coefficients at iteration %d" -msgstr "Nieskończone współczynniki w iteracji" - -# Recommended/mgcv/R/mgcv.r: 1999 -# warning("Algorithm did not converge") -# Recommended/mgcv/R/gam.fit3.r: 652 -# warning("Algorithm did not converge") -#, fuzzy -msgid "algorithm did not converge" -msgstr "Algorytm nie uzbieżnił się" - -# Recommended/mgcv/R/bam.r: 445 -# warning("fitted probabilities numerically 0 or 1 occurred") -# Recommended/mgcv/R/bam.r: 611 -# warning("fitted probabilities numerically 0 or 1 occurred") -# Recommended/mgcv/R/mgcv.r: 2006 -# warning("fitted probabilities numerically 0 or 1 occurred") -# Recommended/mgcv/R/gam.fit3.r: 658 -# warning("fitted probabilities numerically 0 or 1 occurred") -msgid "fitted probabilities numerically 0 or 1 occurred" -msgstr "" -"dopasowane prawdopodobieństwa okazały się być numerycznie równe 0 lub 1" - -# Recommended/mgcv/R/bam.r: 449 -# warning("fitted rates numerically 0 occurred") -# Recommended/mgcv/R/bam.r: 615 -# warning("fitted rates numerically 0 occurred") -# Recommended/mgcv/R/mgcv.r: 2010 -# warning("fitted rates numerically 0 occurred") -# Recommended/mgcv/R/gam.fit3.r: 662 -# warning("fitted rates numerically 0 occurred") -msgid "fitted rates numerically 0 occurred" -msgstr "dopasowane wskaźniki numerycznie okazały się być równe 0" - -# Recommended/mgcv/R/mgcv.r: 1934 -# warning(paste("Non-finite coefficients at iteration",iter)) -# Recommended/mgcv/R/gam.fit3.r: 324 -# warning("Non-finite coefficients at iteration ", -# iter) -#, fuzzy -msgid "non-finite coefficients at iteration" -msgstr "Nieskończone współczynniki w iteracji" - -# Recommended/mgcv/R/bam.r: 1048 -# stop("family not recognized") -# Recommended/mgcv/R/mgcv.r: 1546 -# stop("family not recognized") -# Recommended/mgcv/R/gamm.r: 1416 -# stop("family not recognized") -msgid "family not recognized" -msgstr "'family' nie został rozpoznany" - -# Recommended/mgcv/R/mgcv.r: 1259 -# stop("unknown smoothness selection criterion") -#, fuzzy -msgid "un-supported smoothness selection method" -msgstr "nieznane kryterium wyboru wygładzania" - -msgid "min.sp not supported with fast REML computation, and ignored." -msgstr "" - -msgid "sparse=TRUE not supported with fast REML, reset to REML." -msgstr "" - -# Recommended/mgcv/R/bam.r: 1080 -# stop("Not enough (non-NA) data to do anything meaningful") -# Recommended/mgcv/R/mgcv.r: 1522 -# stop("Not enough (non-NA) data to do anything meaningful") -# Recommended/mgcv/R/gamm.r: 1397 -# stop("Not enough (non-NA) data to do anything meaningful") -msgid "Not enough (non-NA) data to do anything meaningful" -msgstr "" -"Brak wystarczającej (nie NA) liczby danych, aby wykonać cokolwiek sensownego" - -msgid "AR.start must be logical" -msgstr "" - -# Recommended/mgcv/R/bam.r: 1123 -# stop("Model has more coefficients than data") -# Recommended/mgcv/R/mgcv.r: 1561 -# stop("Model has more coefficients than data") -msgid "Model has more coefficients than data" -msgstr "Model posiada więcej współczynników niż danych" - -msgid "chunk.size < number of coefficients. Reset to %d" -msgstr "" - -msgid "model matrix too dense for any possible benefit from sparse" -msgstr "" - -msgid "AR1 parameter rho unused with sparse fitting" -msgstr "" - -msgid "AR1 parameter rho unused with generalized model" -msgstr "" - -msgid "samfrac too small - ignored" -msgstr "" - -msgid "Model can not be updated" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "link not available for coxph family; available link is \"identity\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -msgid "NA times supplied for cox.ph prediction" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "" -"link not available for ordered categorical family; available links are " -"\"identity\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -msgid "Must supply theta or R to ocat" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 177 -# stop("x out of range") -#, fuzzy -msgid "values out of range" -msgstr "'x' jest poza zakresem" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -msgid "" -"link not available for negative binomial family; available links are " -"\"identity\", \"log\" and \"sqrt\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -# Recommended/mgcv/R/gam.fit3.r: 2094 -# stop("negative values not allowed for the negative binomial family") -msgid "negative values not allowed for the negative binomial family" -msgstr "ujemne wartości nie są dozwolone dla rozkładu z rodziny Pascala" - -# Recommended/mgcv/R/gam.fit3.r: 2266 -# stop(gettextf("link \"%s\" not available for poisson family.", -# linktemp, collapse = ""),domain = NA) -#, fuzzy -msgid "link \"%s\" not available for Tweedie family." -msgstr "połączenie \"%s\" nie jest dostępne dla rozkładów z rodziny Poissona." - -msgid "Tweedie p must be in interval (a,b)" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "" -"link not available for beta regression; available links are \"logit\", " -"\"probit\", \"cloglog\" and \"cauchit\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -msgid "saturated likelihood may be inaccurate" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "" -"link not available for scaled t distribution; available links are \"identity" -"\", \"log\", and \"inverse\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -# Recommended/mgcv/R/mgcv.r: 1657 -# stop("value of epsilon must be > 0") -#, fuzzy -msgid "scaled t df must be >2" -msgstr "wartość 'epsilon' musi być > 0" - -# Recommended/mgcv/R/gam.fit3.r: 2094 -# stop("negative values not allowed for the negative binomial family") -#, fuzzy -msgid "NA values not allowed for the scaled t family" -msgstr "ujemne wartości nie są dozwolone dla rozkładu z rodziny Pascala" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "" -"link not available for zero inflated; available link for `lambda' is only " -"\"loga\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -# Recommended/mgcv/R/gam.fit3.r: 2094 -# stop("negative values not allowed for the negative binomial family") -#, fuzzy -msgid "negative values not allowed for the zero inflated Poisson family" -msgstr "ujemne wartości nie są dozwolone dla rozkładu z rodziny Pascala" - -msgid "Non-integer response variables are not allowed with ziP" -msgstr "" - -msgid "Using ziP for binary data makes no sense" -msgstr "" - -msgid "fast REML optimizer reached iteration limit" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 117 -# stop("unsupported order of differentiation requested of gam.fit3") -msgid "unsupported order of differentiation requested of gam.fit3" -msgstr "niewspierany porządek różniczkowania zażądany od 'gam.fit3'" - -# Recommended/mgcv/R/mgcv.r: 1827 -# stop("illegal `family' argument") -# Recommended/mgcv/R/gam.fit3.r: 189 -# stop("illegal `family' argument") -msgid "illegal `family' argument" -msgstr "niepoprawny argument 'family'" - -# Recommended/mgcv/R/gam.fit3.r: 218 -# stop("Invalid linear predictor values in empty model") -msgid "Invalid linear predictor values in empty model" -msgstr "Niepoprawne wartości liniowej zmiennej niezależnej w pustym modelu" - -# Recommended/mgcv/R/gam.fit3.r: 221 -# stop("Invalid fitted means in empty model") -msgid "Invalid fitted means in empty model" -msgstr "Niepoprawnie dopasowane średnie w pustym modelu" - -# Recommended/mgcv/R/gam.fit3.r: 243 -# stop("Length of start should equal ", nvars, -# " and correspond to initial coefs for ", deparse(xnames)) -#, fuzzy -msgid "Length of start should equal %d and correspond to initial coefs for %s" -msgstr "oraz odpowiadać początkowym współczynnikom dla" - -# Recommended/mgcv/R/mgcv.r: 1861 -# stop("Can't find valid starting values: please specify some") -# Recommended/mgcv/R/gam.fit3.r: 255 -# stop("Can't find valid starting values: please specify some") -msgid "Can't find valid starting values: please specify some" -msgstr "" -"Nie można znaleźć poprawnych wartości startowych: proszę określić kilka" - -# Recommended/mgcv/R/mgcv.r: 1876 -# stop("NAs in V(mu)") -# Recommended/mgcv/R/gam.fit3.r: 272 -# stop("NAs in V(mu)") -# Recommended/mgcv/R/gam.fit3.r: 447 -# stop("NAs in V(mu)") -msgid "NAs in V(mu)" -msgstr "wartości NA w 'V(mu)'" - -# Recommended/mgcv/R/mgcv.r: 1878 -# stop("0s in V(mu)") -# Recommended/mgcv/R/gam.fit3.r: 274 -# stop("0s in V(mu)") -# Recommended/mgcv/R/gam.fit3.r: 448 -# stop("0s in V(mu)") -msgid "0s in V(mu)" -msgstr "zera w 'V(mu)'" - -# Recommended/mgcv/R/mgcv.r: 1881 -# stop("NAs in d(mu)/d(eta)") -# Recommended/mgcv/R/gam.fit3.r: 277 -# stop("NAs in d(mu)/d(eta)") -# Recommended/mgcv/R/gam.fit3.r: 451 -# stop("NAs in d(mu)/d(eta)") -msgid "NAs in d(mu)/d(eta)" -msgstr "wartości NA w 'd(mu)/d(eta)'" - -# Recommended/mgcv/R/mgcv.r: 1885 -# warning(paste("No observations informative at iteration", -# iter)) -# Recommended/mgcv/R/gam.fit3.r: 283 -# warning("No observations informative at iteration ", iter) -#, fuzzy -msgid "No observations informative at iteration %d" -msgstr "Brak informacyjnych obserwacji w iteracji" - -# Recommended/mgcv/R/gam.fit3.r: 302 -# stop("Not enough informative observations.") -msgid "Not enough informative observations." -msgstr "Zbyt mało informacyjnych obserwacji." - -# Recommended/mgcv/R/mgcv.r: 1934 -# warning(paste("Non-finite coefficients at iteration",iter)) -# Recommended/mgcv/R/gam.fit3.r: 324 -# warning("Non-finite coefficients at iteration ", -# iter) -#, fuzzy -msgid "Non-finite coefficients at iteration %d" -msgstr "Nieskończone współczynniki w iteracji" - -# Recommended/mgcv/R/mgcv.r: 1949 -# stop("no valid set of coefficients has been found:please supply starting values", -# call. = FALSE) -# Recommended/mgcv/R/gam.fit3.r: 340 -# stop("no valid set of coefficients has been found:please supply starting values", -# call. = FALSE) -msgid "" -"no valid set of coefficients has been found:please supply starting values" -msgstr "" -"nie znaleziono poprawnego zestawu współczynników: proszę dostarczyć wartości " -"startowe" - -# Recommended/mgcv/R/mgcv.r: 1951 -# warning("Step size truncated due to divergence",call.=FALSE) -# Recommended/mgcv/R/gam.fit3.r: 346 -# warning("Step size truncated due to divergence", -# call. = FALSE) -msgid "Step size truncated due to divergence" -msgstr "Rozmiar kroku przycięty z uwagi na rozbieżność" - -# Recommended/mgcv/R/mgcv.r: 1955 -# stop("inner loop 1; can't correct step size") -# Recommended/mgcv/R/gam.fit3.r: 351 -# stop("inner loop 1; can't correct step size") -msgid "inner loop 1; can't correct step size" -msgstr "wewnętrzna pętla 1; nie można poprawić rozmiaru kroku" - -# Recommended/mgcv/R/gam.fit3.r: 363 -# warning("Step size truncated: out of bounds", -# call. = FALSE) -msgid "Step size truncated: out of bounds" -msgstr "Rozmiar kroku przycięty: poza granicami" - -# Recommended/mgcv/R/mgcv.r: 1972 -# stop("inner loop 2; can't correct step size") -# Recommended/mgcv/R/gam.fit3.r: 368 -# stop("inner loop 2; can't correct step size") -msgid "inner loop 2; can't correct step size" -msgstr "wewnętrzna pętla 2; nie można poprawić rozmiaru kroku" - -msgid "penalized deviance = %s" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 397 -# stop("inner loop 3; can't correct step size") -msgid "inner loop 3; can't correct step size" -msgstr "wewnętrzna pętla 3; nie można poprawić rozmiaru kroku" - -msgid "Step halved: new penalized deviance = %g" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 556 -# stop("Non finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam.contol'") -msgid "" -"Non finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam." -"contol'" -msgstr "" -"Nieskończone pochodne. Spróbuj zmniejszyć tolerancję dopasowania! Zobacz " -"'epsilon' w 'gam.contol'" - -# Recommended/mgcv/R/gam.fit3.r: 617 -# stop( -# "Non-finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam.contol'") -# Recommended/mgcv/R/gam.fit3.r: 633 -# stop( -# "Non-finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam.contol'") -msgid "" -"Non-finite derivatives. Try decreasing fit tolerance! See `epsilon' in `gam." -"contol'" -msgstr "" -"Nieskończone pochodne. Spróbuj zmniejszyć tolerancję dopasowania! Zobacz " -"'epsilon' w 'gam.contol'" - -# Recommended/mgcv/R/mgcv.r: 1999 -# warning("Algorithm did not converge") -# Recommended/mgcv/R/gam.fit3.r: 652 -# warning("Algorithm did not converge") -msgid "Algorithm did not converge" -msgstr "Algorytm nie uzbieżnił się" - -# Recommended/mgcv/R/mgcv.r: 2002 -# warning("Algorithm stopped at boundary value") -# Recommended/mgcv/R/gam.fit3.r: 654 -# warning("Algorithm stopped at boundary value") -msgid "Algorithm stopped at boundary value" -msgstr "Algorytm zatrzymał się na wartości granicznej" - -msgid "Pearson scale estimate maybe unstable. See ?gam.scale." -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 766 -# stop("deriv should be 1 or 2") -msgid "deriv should be 1 or 2" -msgstr "'deriv' powinien wynosić 1 lub 2" - -# Recommended/mgcv/R/mgcv.r: 3534 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 937 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 979 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 1256 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 1499 -# stop("L must be a matrix.") -msgid "L must be a matrix." -msgstr "'L' musi być macierzą." - -# Recommended/mgcv/R/mgcv.r: 3535 -# stop("L must have at least as many rows as columns.") -# Recommended/mgcv/R/gam.fit3.r: 938 -# stop("L must have at least as many rows as columns.") -# Recommended/mgcv/R/gam.fit3.r: 980 -# stop("L must have at least as many rows as columns.") -# Recommended/mgcv/R/gam.fit3.r: 1257 -# stop("L must have at least as many rows as columns.") -# Recommended/mgcv/R/gam.fit3.r: 1500 -# stop("L must have at least as many rows as columns.") -msgid "L must have at least as many rows as columns." -msgstr "'L' musi mieć co najmniej tyle wierszy co kolumn." - -# Recommended/mgcv/R/mgcv.r: 3536 -# stop("L has inconsistent dimensions.") -# Recommended/mgcv/R/gam.fit3.r: 939 -# stop("L has inconsistent dimensions.") -# Recommended/mgcv/R/gam.fit3.r: 981 -# stop("L has inconsistent dimensions.") -# Recommended/mgcv/R/gam.fit3.r: 1258 -# stop("L has inconsistent dimensions.") -# Recommended/mgcv/R/gam.fit3.r: 1501 -# stop("L has inconsistent dimensions.") -msgid "L has inconsistent dimensions." -msgstr "'L' ma niespójne wymiary." - -msgid "link not implemented for extended families" -msgstr "" - -# Recommended/mgcv/R/gam.fit3.r: 1790 -# stop("fam not a family object") -# Recommended/mgcv/R/gam.fit3.r: 1912 -# stop("fam not a family object") -# Recommended/mgcv/R/gam.fit3.r: 1974 -# stop("fam not a family object") -# Recommended/mgcv/R/plots.r: 26 -# stop("fam not a family object") -# Recommended/mgcv/R/plots.r: 53 -# stop("fam not a family object") -msgid "fam not a family object" -msgstr "'fam' nie jest obiektem rodziny" - -# Recommended/mgcv/R/gam.fit3.r: 1812 -# stop("unrecognized (vector?) link") -msgid "unrecognized (vector?) link" -msgstr "nierozpoznane (wektorowe?) połączenie" - -# Recommended/mgcv/R/gam.fit3.r: 1904 -# stop("link not recognised") -msgid "link not recognised" -msgstr "połączenie nie zostało rozpoznane" - -# Recommended/mgcv/R/gam.fit3.r: 1944 -# stop("variance function not recognized for quasi") -msgid "variance function not recognized for quasi" -msgstr "funkcja wariancji nie została rozpoznana dla kwazi" - -# Recommended/mgcv/R/gam.fit3.r: 1967 -# stop("family not recognised") -# Recommended/mgcv/R/gam.fit3.r: 2036 -# stop("family not recognised") -msgid "family not recognised" -msgstr "rodzina nie została rozpoznana" - -# Recommended/mgcv/man/negbin.Rd: 27 -# stop("'theta' must be specified") -# Recommended/mgcv/R/gam.fit3.r: 2040 -# stop("'theta' must be specified") -msgid "'theta' must be specified" -msgstr "'theta' musi być określone" - -# Recommended/mgcv/R/gam.fit3.r: 2061 -# stop(linktemp, " link not available for negative binomial family; available links are \"identity\", \"log\" and \"sqrt\"") -#, fuzzy -msgid "" -"%s link not available for negative binomial family; available links are " -"\"identity\", \"log\" and \"sqrt\"" -msgstr "" -"połączenie nie jest dostępne dla rodziny rozkładu Pascala; dostępne " -"połączenia to \"identity\", \"log\" oraz \"sqrt\"" - -# Recommended/mgcv/R/gam.fit3.r: 2124 -# stop("H has wrong dimension") -# Recommended/mgcv/R/gam.fit3.r: 2146 -# stop("H has wrong dimension") -msgid "H has wrong dimension" -msgstr "'H' ma niepoprawny wymiar" - -# Recommended/mgcv/R/gam.fit3.r: 2184 -# stop("only scalar `p' and `phi' allowed.") -#, fuzzy -msgid "only scalar `rho' and `theta' allowed." -msgstr "tylko skalarne `p' oraz `phi' są dozwolone." - -msgid "1 0") -msgid "value of epsilon must be > 0" -msgstr "wartość 'epsilon' musi być > 0" - -# Recommended/mgcv/R/mgcv.r: 1659 -# stop("maximum number of iterations must be > 0") -msgid "maximum number of iterations must be > 0" -msgstr "maksymalna liczba iteracji musi być > 0" - -# Recommended/mgcv/R/mgcv.r: 1662 -# warning("silly value supplied for rank.tol: reset to square root of machine precision.") -msgid "" -"silly value supplied for rank.tol: reset to square root of machine precision." -msgstr "" -"śmieszna wartość została dostarczona do 'rank.tol': ustawianie pierwiastka " -"kwadratowego z precyzji maszyny" - -# Recommended/mgcv/R/mgcv.r: 1790 -# stop("Model seems to contain no terms") -msgid "Model seems to contain no terms" -msgstr "Model wydaje się nie zawierać żadnych członów" - -# Recommended/mgcv/R/mgcv.r: 1800 -# warning("Discrete Theta search not available with performance iteration") -msgid "Discrete Theta search not available with performance iteration" -msgstr "Poszukiwania dyskretnej thety nie są dostępne z wykonywaną iteracją" - -# Recommended/mgcv/R/mgcv.r: 1842 -# stop("y must be univariate unless binomial") -msgid "y must be univariate unless binomial" -msgstr "'y' musi zawierać jedną zmienną jeśli nie zawiera dwóch" - -# Recommended/mgcv/R/mgcv.r: 1850 -# stop(paste("Length of start should equal", nvars, -# "and correspond to initial coefs.")) -#, fuzzy -msgid "Length of start should equal %d and correspond to initial coefs." -msgstr "oraz odpowiadać początkowym współczynnikom." - -# Recommended/mgcv/R/mgcv.r: 1912 -# stop("iterative weights or data non-finite in gam.fit - regularization may help. See ?gam.control.") -msgid "" -"iterative weights or data non-finite in gam.fit - regularization may help. " -"See ?gam.control." -msgstr "" -"iteracyjne wagi lub nieskończone dane w 'gam.fit' - regularyzacja może " -"pomóc. Zobacz '?gam.control'." - -# Recommended/mgcv/R/mgcv.r: 1968 -# warning("Step size truncated: out of bounds.",call.=FALSE) -msgid "Step size truncated: out of bounds." -msgstr "Rozmiar kroku został przycięty: poza zakresem." - -# Recommended/mgcv/R/mgcv.r: 2052 -# stop("`object' is not of class \"gam\"") -msgid "`object' is not of class \"gam\"" -msgstr "'object' nie jest klasy \"gam\"" - -msgid "Smoothness uncertainty corrected covariance not available" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 2095 -# warning("Unknown type, reset to terms.") -msgid "Unknown type, reset to terms." -msgstr "Nieznany typ, resetowanie do 'terms'." - -# Recommended/mgcv/R/mgcv.r: 2098 -# stop("predict.gam can only be used to predict from gam objects") -msgid "predict.gam can only be used to predict from gam objects" -msgstr "" -"'predict.gam' może zostać użyty jedynie do przewidywania z obiektów \"gam\"" - -# Recommended/mgcv/R/mgcv.r: 2122 -# stop( -# "newdata is a model.frame: it should contain all required variables\n -msgid "newdata is a model.frame: it should contain all required variables" -msgstr "" -"\"newdata\" jest klasy \"model.frame\": powinien zawierać wszystkie wymagane " -"zmienne" - -# Recommended/mgcv/R/mgcv.r: 2135 -# warning("not all required variables have been supplied in newdata!\n -msgid "not all required variables have been supplied in newdata!" -msgstr "nie wszystkie wymagane zmienne zostały dostarczone w \"newdata\"!" - -msgid "type iterms not available for multiple predictor cases" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 2278 -# warning("non-existent terms requested - ignoring") -msgid "non-existent terms requested - ignoring" -msgstr "zażądano nieistniejących członów - ignorowanie" - -# Recommended/mgcv/R/mgcv.r: 2052 -# stop("`object' is not of class \"gam\"") -#, fuzzy -msgid "requires an object of class gam" -msgstr "'object' nie jest klasy \"gam\"" - -msgid "nothing to do for this model" -msgstr "" - -msgid "" -"Pearson residuals not available for this family - returning deviance " -"residuals" -msgstr "" - -msgid "lambda and h should have the same length!" -msgstr "" - -msgid "recov works with fitted gam objects only" -msgstr "" - -msgid "m can't be in re" -msgstr "" - -msgid "p-values may give low power in some circumstances" -msgstr "" - -msgid "p-values un-reliable" -msgstr "" - -msgid "p-values may give very low power" -msgstr "" - -msgid "" -"p-values for any terms that can be penalized to zero will be unreliable: " -"refit model to fix this." -msgstr "" - -msgid "p.type!=0 is deprecated, and liable to be removed in future" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 3071 -# warning("The following arguments to anova.glm(..) are invalid and dropped: ", -# paste(deparse(dotargs[named]), collapse = ", ")) -msgid "The following arguments to anova.glm(..) are invalid and dropped:" -msgstr "" -"Następujące argumenty przekazywane do 'anova.glm(..)' są niepoprawne i " -"zostały odrzucone:" - -# Recommended/mgcv/R/mgcv.r: 3071 -# warning("The following arguments to anova.glm(..) are invalid and dropped: ", -# paste(deparse(dotargs[named]), collapse = ", ")) -msgid "," -msgstr "," - -# Recommended/mgcv/R/mgcv.r: 3080 -# warning("test argument ignored") -msgid "test argument ignored" -msgstr "argument 'test' został zignorowany" - -# Recommended/mgcv/R/mgcv.r: 3081 -# stop("anova.gam called with non gam object") -msgid "anova.gam called with non gam object" -msgstr "'anova.gam' wywołana z obiektem innym niż klasy \"gam\"" - -# Recommended/mgcv/R/gam.fit3.r: 1790 -# stop("fam not a family object") -# Recommended/mgcv/R/gam.fit3.r: 1912 -# stop("fam not a family object") -# Recommended/mgcv/R/gam.fit3.r: 1974 -# stop("fam not a family object") -# Recommended/mgcv/R/plots.r: 26 -# stop("fam not a family object") -# Recommended/mgcv/R/plots.r: 53 -# stop("fam not a family object") -#, fuzzy -msgid "not a gam object" -msgstr "'fam' nie jest obiektem rodziny" - -# Recommended/mgcv/R/mgcv.r: 3151 -# stop("argument is not a gam object") -msgid "argument is not a gam object" -msgstr "argument nie jest obiektem klasy \"gam\"" - -# Recommended/mgcv/R/mgcv.r: 3331 -# stop("Supplied matrix not symmetric") -msgid "Supplied matrix not symmetric" -msgstr "Dostarczona macierz nie jest symetryczna" - -# Recommended/mgcv/R/mgcv.r: 3335 -# stop("singular values not returned in order") -msgid "singular values not returned in order" -msgstr "osobliwe wartości nie zostały zwrócone w w sposób uporządkowany" - -# Recommended/mgcv/R/mgcv.r: 3341 -# stop("Something wrong - matrix probably not +ve semi definite") -msgid "Something wrong - matrix probably not +ve semi definite" -msgstr "Coś nie tak - prawdopodobnie macierz nie jest dodatnio określona" - -# Recommended/mgcv/R/mgcv.r: 3356 -# stop("method not recognised.") -msgid "method not recognised." -msgstr "metoda nie została rozpoznana." - -# Recommended/mgcv/R/mgcv.r: 3473 -# stop(paste("S[[",i,"]] matrix is not +ve definite.",sep="")) -#, fuzzy -msgid "S[[%d]] matrix is not +ve definite." -msgstr "]] nie jest dodatnio określona." - -# Recommended/mgcv/R/mgcv.r: 3580 -# stop("dimensions of supplied w wrong.") -msgid "dimensions of supplied w wrong." -msgstr "wymiary dostarczonego 'w' są niepoprawne." - -# Recommended/mgcv/R/mgcv.r: 3584 -# stop("w different length from y!") -msgid "w different length from y!" -msgstr "'w' posiada długość inną niż 'y'!" - -# Recommended/mgcv/R/mgcv.r: 3591 -# stop("X lost dimensions in magic!!") -msgid "X lost dimensions in magic!!" -msgstr "'X' utraciło wymiary w 'magic()!'!" - -# Recommended/mgcv/R/smooth.r: 270 -# warning("dimension of fx is wrong") -#, fuzzy -msgid "mu dimensions wrong" -msgstr "wymiar 'fx' jest niepoprawny" - -msgid "a has wrong number of rows" -msgstr "" - -msgid "mvn requires 2 or more dimensional data" -msgstr "" - -msgid "mvn dimension error" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 2052 -# stop("`object' is not of class \"gam\"") -#, fuzzy -msgid "object is not a glm or gam" -msgstr "'object' nie jest klasy \"gam\"" - -msgid "names of z and pc must match" -msgstr "" - -# Recommended/mgcv/R/plots.r: 860 -# warning("Partial residuals do not have a natural x-axis location for linear functional terms") -msgid "" -"Partial residuals do not have a natural x-axis location for linear " -"functional terms" -msgstr "" -"Częsciowe reszty nie posiadają naturalnego położenia osi 'x' dla liniowych " -"członów funkcyjnych" - -# Recommended/mgcv/R/plots.r: 894 -# warning("no automatic plotting for smooths of more than two variables") -msgid "no automatic plotting for smooths of more than two variables" -msgstr "" -"brak automatycznego rysowania dla wygładzeń o więcej niż dwóch zmiennych" - -# Recommended/mgcv/R/plots.r: 937 -# warning("no automatic plotting for smooths of more than one variable") -msgid "no automatic plotting for smooths of more than one variable" -msgstr "" -"brak automatycznego rysowania dla wygładzeń o więcej niż jednej zmiennej" - -# Recommended/mgcv/R/plots.r: 990 -# warning("residuals argument to plot.gam is wrong length: ignored") -msgid "residuals argument to plot.gam is wrong length: ignored" -msgstr "" -"argument reszt przekazywany do 'plot.gam' ma niepoprawną długość: zignorowano" - -# Recommended/mgcv/R/plots.r: 1016 -# warning("No variance estimates available") -msgid "No variance estimates available" -msgstr "Brak dostępnego oszacowania wariancji" - -# Recommended/mgcv/R/plots.r: 1078 -# stop("No terms to plot - nothing for plot.gam() to do.") -msgid "No terms to plot - nothing for plot.gam() to do." -msgstr "Brak członów do rysowania - nic do wykonania przez 'plot.gam()'." - -# Recommended/mgcv/R/mgcv.r: 3312 -# stop("grid vectors are different lengths") -# Recommended/mgcv/R/plots.r: 1198 -# stop("grid vectors are different lengths") -msgid "grid vectors are different lengths" -msgstr "wektory siatki są różnej długości" - -# Recommended/mgcv/R/mgcv.r: 3313 -# stop("data vectors are of different lengths") -# Recommended/mgcv/R/plots.r: 1199 -# stop("data vectors are of different lengths") -msgid "data vectors are of different lengths" -msgstr "wektory danych są różnej długości" - -# Recommended/mgcv/R/mgcv.r: 3314 -# stop("supplied dist negative") -# Recommended/mgcv/R/plots.r: 1200 -# stop("supplied dist negative") -msgid "supplied dist negative" -msgstr "dostarczona odległość jest ujemna" - -# Recommended/mgcv/R/plots.r: 1256 -# stop("Model does not seem to have enough terms to do anything useful") -msgid "Model does not seem to have enough terms to do anything useful" -msgstr "" -"Model nie wydaje się mieć wystarczającej liczby członów aby zrobić coś " -"użytecznego" - -# Recommended/mgcv/R/plots.r: 1258 -# stop( -# paste(c("view variables must be one of",v.names),collapse=", ")) -#, fuzzy -msgid "view variables must be one of %s" -msgstr "zmienne podglądu muszą jednym z" - -# Recommended/mgcv/R/plots.r: 1262 -# stop("Don't know what to do with parametric terms that are not simple numeric or factor variables") -msgid "" -"Don't know what to do with parametric terms that are not simple numeric or " -"factor variables" -msgstr "" -"Nie wiadomo co zrobić z członami parametrycznymi, które nie są zmiennymi o " -"prostych liczbach lub czynnikami" - -# Recommended/mgcv/R/plots.r: 1272 -# stop(paste("View variables must contain more than one value. view = c(",view[1],",",view[2],").",sep="")) -#, fuzzy -msgid "View variables must contain more than one value. view = c(%s,%s)." -msgstr "zmienne 'view' muszą zawierać więcej niż jedną wartość. view =c(" - -# Recommended/mgcv/R/plots.r: 1320 -# stop("type must be \"link\" or \"response\"") -msgid "type must be \"link\" or \"response\"" -msgstr "'type' musi mieć wartość \"link\" lub \"response\"" - -# Recommended/mgcv/R/plots.r: 1347 -# stop("Something wrong with zlim") -# Recommended/mgcv/R/plots.r: 1407 -# stop("Something wrong with zlim") -msgid "Something wrong with zlim" -msgstr "Coś nie tak z 'zlim'" - -# Recommended/mgcv/R/plots.r: 1363 -# stop("color scheme not recognised") -msgid "color scheme not recognised" -msgstr "nie rozpoznano schematu kolorów" - -# Recommended/mgcv/R/plots.r: 1416 -# warning("sorry no option for contouring with errors: try plot.gam") -msgid "sorry no option for contouring with errors: try plot.gam" -msgstr "przykro mi, brak opcji rysowania konturu z błędami: spróbuj 'plot.gam'" - -# Recommended/mgcv/R/smooth.r: 142 -# stop("At least three knots required in call to mono.con.") -msgid "At least three knots required in call to mono.con." -msgstr "" -"Co najmniej trzy węzły są wymagane w wywołaniu przekazywanym do 'mono.con()'." - -# Recommended/mgcv/R/smooth.r: 145 -# stop("lower bound >= upper bound in call to mono.con()") -msgid "lower bound >= upper bound in call to mono.con()" -msgstr "dolny zakres >= górny zakres w wywołaniu przekazywanym do 'mono.con()'" - -# Recommended/mgcv/R/smooth.r: 157 -# stop("x is null") -msgid "x is null" -msgstr "'x' ma wartość NULL" - -# Recommended/mgcv/R/smooth.r: 158 -# stop("x has no row attribute") -msgid "x has no row attribute" -msgstr "'x' nie posiada atrybutu 'row'" - -# Recommended/mgcv/R/smooth.r: 159 -# stop("x has no col attribute") -msgid "x has no col attribute" -msgstr "'x' nie posiada atrybutu 'col'" - -# Recommended/mgcv/R/smooth.r: 173 -# stop("order too low") -msgid "order too low" -msgstr "zbyt mały 'order'" - -# Recommended/mgcv/R/smooth.r: 174 -# stop("too few knots") -# Recommended/mgcv/R/smooth.r: 1306 -# stop("too few knots") -msgid "too few knots" -msgstr "zbyt mało węzłów" - -# Recommended/mgcv/R/smooth.r: 177 -# stop("x out of range") -msgid "x out of range" -msgstr "'x' jest poza zakresem" - -# Recommended/mgcv/R/smooth.r: 251 -# warning("something wrong with argument d.") -# Recommended/mgcv/R/smooth.r: 359 -# warning("something wrong with argument d.") -msgid "something wrong with argument d." -msgstr "coś nie tak z argumentem 'd'." - -# Recommended/mgcv/R/smooth.r: 260 -# warning("one or more supplied k too small - reset to default") -# Recommended/mgcv/R/smooth.r: 368 -# warning("one or more supplied k too small - reset to default") -msgid "one or more supplied k too small - reset to default" -msgstr "" -"jeden lub więcej dostarczonych 'k' jest zbyt mały - przyjmowanie wartości " -"domyślnej" - -# Recommended/mgcv/R/smooth.r: 270 -# warning("dimension of fx is wrong") -msgid "dimension of fx is wrong" -msgstr "wymiar 'fx' jest niepoprawny" - -# Recommended/mgcv/R/smooth.r: 278 -# stop("xt argument is faulty.") -# Recommended/mgcv/R/smooth.r: 380 -# stop("xt argument is faulty.") -msgid "xt argument is faulty." -msgstr "argument 'xt' jest błędny." - -# Recommended/mgcv/R/smooth.r: 282 -# warning("bs wrong length and ignored.") -# Recommended/mgcv/R/smooth.r: 384 -# warning("bs wrong length and ignored.") -msgid "bs wrong length and ignored." -msgstr "'bs' posiada niepoprawną długość przez co został zignorowany." - -# Recommended/mgcv/R/smooth.r: 288 -# warning("m wrong length and ignored.") -# Recommended/mgcv/R/smooth.r: 390 -# warning("m wrong length and ignored.") -msgid "m wrong length and ignored." -msgstr "'m' posiada niepoprawną długość przez co został zignorowany." - -# Recommended/mgcv/R/smooth.r: 294 -# stop("Repeated variables as arguments of a smooth are not permitted") -# Recommended/mgcv/R/smooth.r: 396 -# stop("Repeated variables as arguments of a smooth are not permitted") -# Recommended/mgcv/R/smooth.r: 469 -# stop("Repeated variables as arguments of a smooth are not permitted") -msgid "Repeated variables as arguments of a smooth are not permitted" -msgstr "Powtórzone zmienne jako argumenty wygładzenia nie są dozwolone" - -# Recommended/mgcv/R/smooth.r: 317 -# warning("only first element of `id' used") -# Recommended/mgcv/R/smooth.r: 427 -# warning("only first element of `id' used") -# Recommended/mgcv/R/smooth.r: 477 -# warning("only first element of `id' used") -msgid "only first element of `id' used" -msgstr "został użyty jedynie pierwszy element 'id'" - -msgid "ord is wrong. reset to NULL." -msgstr "" - -msgid "ord contains out of range orders (which will be ignored)" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 454 -# stop("by=. not allowed") -msgid "by=. not allowed" -msgstr "'by=.' nie jest dozwolone" - -# Recommended/mgcv/R/smooth.r: 456 -# stop("s(.) not yet supported.") -# Recommended/mgcv/R/smooth.r: 460 -# stop("s(.) not yet supported.") -msgid "s(.) not yet supported." -msgstr "'s(.)' nie jest jeszcze wspierane." - -# Recommended/mgcv/R/smooth.r: 466 -# warning("argument k of s() should be integer and has been rounded") -msgid "argument k of s() should be integer and has been rounded" -msgstr "" -"argument 'k' w 's()' powinie być liczbą calkowitą więc został zaokrąglony" - -msgid "attempt to use unsuitable marginal smooth class" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 556 -# stop("Sorry, tensor products of smooths with multiple penalties are not supported.") -# Recommended/mgcv/R/smooth.r: 798 -# stop("Sorry, tensor products of smooths with multiple penalties are not supported.") -msgid "" -"Sorry, tensor products of smooths with multiple penalties are not supported." -msgstr "" -"Przykro mi, produkty tensorowe wygładzeń z wielokrotnymi karami nie są " -"wpierane." - -# Recommended/mgcv/R/smooth.r: 585 -# warning("reparameterization unstable for margin: not done") -msgid "reparameterization unstable for margin: not done" -msgstr "ponowna parametryzacja nie jest stabilna dla marginesu: nie wykonano" - -msgid "" -"single penalty tensor product smooths are deprecated and likely to be " -"removed soon" -msgstr "" - -msgid "fx length wrong from t2 term: ignored" -msgstr "" - -msgid "length of sp incorrect in t2: ignored" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 969 -# stop("d can not be negative in call to null.space.dimension().") -msgid "d can not be negative in call to null.space.dimension()." -msgstr "" -"'d' nie może być ujemne w wywołaniu przekazywanym do 'null.space." -"dimension()'." - -# Recommended/mgcv/R/smooth.r: 1007 -# stop("arguments of smooth not same dimension") -# Recommended/mgcv/R/smooth.r: 1139 -# stop("arguments of smooth not same dimension") -# Recommended/mgcv/R/smooth.r: 2251 -# stop("arguments of smooth not same dimension") -# Recommended/mgcv/R/smooth.r: 2448 -# stop("arguments of smooth not same dimension") -# Recommended/mgcv/R/smooth.r: 2601 -# stop("arguments of smooth not same dimension") -msgid "arguments of smooth not same dimension" -msgstr "argumenty wygładzania nie mają tego samego wymiaru" - -# Recommended/mgcv/R/smooth.r: 1019 -# stop("components of knots relating to a single smooth must be of same length") -# Recommended/mgcv/R/smooth.r: 2264 -# stop("components of knots relating to a single smooth must be of same length") -# Recommended/mgcv/R/smooth.r: 2461 -# stop("components of knots relating to a single smooth must be of same length") -msgid "components of knots relating to a single smooth must be of same length" -msgstr "" -"komponenty węzłów odwołujące się do pojedynczego wygładzenia muszą być tej " -"samej długości" - -# Recommended/mgcv/R/smooth.r: 1024 -# warning("more knots than data in a tp term: knots ignored.") -msgid "more knots than data in a tp term: knots ignored." -msgstr "więcej węzłów niż danych w członie 'tp': węzły zostały zignorowane." - -# Recommended/mgcv/R/smooth.r: 1061 -# warning("basis dimension, k, increased to minimum possible\n -# Recommended/mgcv/R/smooth.r: 1199 -# warning("basis dimension, k, increased to minimum possible\n -# Recommended/mgcv/R/smooth.r: 1348 -# warning("basis dimension, k, increased to minimum possible\n -msgid "basis dimension, k, increased to minimum possible" -msgstr "wymiar podstawy, k, zwiększył się do minimalnego możliwego" - -# Recommended/mgcv/R/smooth.r: 1140 -# stop("no data to predict at") -# Recommended/mgcv/R/smooth.r: 1270 -# stop("no data to predict at") -# Recommended/mgcv/R/smooth.r: 1402 -# stop("no data to predict at") -msgid "no data to predict at" -msgstr "brak danych na których można oprzeć przewidywanie" - -# Recommended/mgcv/R/smooth.r: 1187 -# stop("Basis only handles 1D smooths") -# Recommended/mgcv/R/smooth.r: 1344 -# stop("Basis only handles 1D smooths") -# Recommended/mgcv/R/smooth.r: 1425 -# stop("Basis only handles 1D smooths") -# Recommended/mgcv/R/smooth.r: 1490 -# stop("Basis only handles 1D smooths") -msgid "Basis only handles 1D smooths" -msgstr "Podstawa obsługuje jedynie jednowymiarowe wygładzania" - -# Recommended/mgcv/R/smooth.r: 1214 -# stop("number of supplied knots != k for a cr smooth") -msgid "number of supplied knots != k for a cr smooth" -msgstr "liczba dostarczonych węzłów != k dla wygładzania 'cr'" - -msgid "F is missing from cr smooth - refit model with current mgcv" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 1305 -# stop("more knots than unique data values is not allowed") -msgid "more knots than unique data values is not allowed" -msgstr "" -"większa liczba węzłów niż unikalnych wartości danych nie jest dozwolona" - -# Recommended/mgcv/R/smooth.r: 1358 -# stop("number of supplied knots != k for a cc smooth") -msgid "number of supplied knots != k for a cc smooth" -msgstr "liczba dostarczonych węzłów != k dla wygładzania 'cc'" - -# Recommended/mgcv/R/smooth.r: 1424 -# stop("basis dimension too small for b-spline order") -# Recommended/mgcv/R/smooth.r: 1489 -# stop("basis dimension too small for b-spline order") -msgid "basis dimension too small for b-spline order" -msgstr "wymiar podstawy jest zbyt mały dla rzędu b-splajnu" - -# Recommended/mgcv/R/smooth.r: 1432 -# stop("knot range does not include data") -# Recommended/mgcv/R/smooth.r: 1496 -# stop("knot range does not include data") -msgid "knot range does not include data" -msgstr "zakres węzła nie zawiera danych" - -# Recommended/mgcv/man/smooth.construct.Rd: 229 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1438 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1441 -# stop(paste("there should be",nk,"knots supplied")) -# Recommended/mgcv/R/smooth.r: 1505 -# stop(paste("there should be ",nk+2*m[1]+2," supplied knots")) -msgid "there should be" -msgstr "powinno być" - -# Recommended/mgcv/man/smooth.construct.Rd: 229 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1438 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1505 -# stop(paste("there should be ",nk+2*m[1]+2," supplied knots")) -msgid "supplied knots" -msgstr "dostarczonych węzłów" - -# Recommended/mgcv/R/smooth.r: 1441 -# stop(paste("there should be",nk,"knots supplied")) -msgid "knots supplied" -msgstr "dostarczonych węzłów" - -# Recommended/mgcv/R/smooth.r: 1446 -# warning("knot range is so wide that there is *no* information about some basis coefficients") -# Recommended/mgcv/R/smooth.r: 1509 -# warning("knot range is so wide that there is *no* information about some basis coefficients") -msgid "" -"knot range is so wide that there is *no* information about some basis " -"coefficients" -msgstr "" -"zakres węzła jest tak szeroki, że *brak* informacji o niektórych " -"podstawowych współczynnikach" - -# Recommended/mgcv/R/smooth.r: 1453 -# stop("penalty order too high for basis dimension") -msgid "penalty order too high for basis dimension" -msgstr "rząd kar jest zbyt duży dla podstawy wymiaru" - -msgid "basis dimension is larger than number of unique covariates" -msgstr "" - -msgid "fs smooths can only have one factor argument" -msgstr "" - -msgid "\"fs\" smooth cannot use a multiply penalized basis (wrong basis in xt)" -msgstr "" - -msgid "\"fs\" terms can not be fixed here" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 1761 -# stop("the adaptive smooth class is limited to 1 or 2 covariates.") -msgid "the adaptive smooth class is limited to 1 or 2 covariates." -msgstr "adaptacyjna klasa wygładzania jest ograniczona do 1 lub 2 zmiennych." - -# Recommended/mgcv/R/smooth.r: 1777 -# stop("penalty basis too large for smoothing basis") -# Recommended/mgcv/R/smooth.r: 1828 -# stop("penalty basis too large for smoothing basis") -msgid "penalty basis too large for smoothing basis" -msgstr "podstawa kar jest zbyt duża dla podstawy wygładzania" - -# Recommended/mgcv/R/smooth.r: 1848 -# stop("penalty basis too small") -msgid "penalty basis too small" -msgstr "podstawa kar jest zbyt mała" - -msgid "random effects don't work with ids." -msgstr "" - -msgid "MRF basis dimension set too high" -msgstr "" - -msgid "data contain regions that are not contained in the knot specification" -msgstr "" - -msgid "" -"penalty matrix, boundary polygons and/or neighbours list must be supplied in " -"xt" -msgstr "" - -msgid "no spatial information provided!" -msgstr "" - -msgid "mismatch between nb/polys supplied area names and data area names" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 251 -# warning("something wrong with argument d.") -# Recommended/mgcv/R/smooth.r: 359 -# warning("something wrong with argument d.") -#, fuzzy -msgid "Something wrong with auto- penalty construction" -msgstr "coś nie tak z argumentem 'd'." - -# Recommended/mgcv/man/smooth.construct.Rd: 229 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1438 -# stop(paste("there should be ",nk," supplied knots")) -# Recommended/mgcv/R/smooth.r: 1505 -# stop(paste("there should be ",nk+2*m[1]+2," supplied knots")) -#, fuzzy -msgid "supplied penalty not square!" -msgstr "dostarczonych węzłów" - -# Recommended/mgcv/R/mgcv.r: 509 -# stop(" a parametric penalty has wrong dimension") -#, fuzzy -msgid "supplied penalty wrong dimension!" -msgstr "parametryczna kara ma niepoprawny wymiar" - -msgid "penalty column names don't match supplied area names!" -msgstr "" - -msgid "Can only deal with a sphere" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 1024 -# warning("more knots than data in a tp term: knots ignored.") -#, fuzzy -msgid "more knots than data in an sos term: knots ignored." -msgstr "więcej węzłów niż danych w członie 'tp': węzły zostały zignorowane." - -# Recommended/mgcv/R/smooth.r: 1024 -# warning("more knots than data in a tp term: knots ignored.") -#, fuzzy -msgid "more knots than data in a ds term: knots ignored." -msgstr "więcej węzłów niż danych w członie 'tp': węzły zostały zignorowane." - -msgid "" -"A term has fewer unique covariate combinations than specified maximum " -"degrees of freedom" -msgstr "" - -msgid "s value reduced" -msgstr "" - -msgid "s value increased" -msgstr "" - -msgid "No suitable s (i.e. m[2]) try increasing m[1]" -msgstr "" - -msgid "s value modified to give continuous function" -msgstr "" - -# Recommended/mgcv/R/smooth.r: 1061 -# warning("basis dimension, k, increased to minimum possible\n -# Recommended/mgcv/R/smooth.r: 1199 -# warning("basis dimension, k, increased to minimum possible\n -# Recommended/mgcv/R/smooth.r: 1348 -# warning("basis dimension, k, increased to minimum possible\n -#, fuzzy -msgid "basis dimension reset to minimum possible" -msgstr "wymiar podstawy, k, zwiększył się do minimalnego możliwego" - -# Recommended/mgcv/R/smooth.r: 2753 -# warning("smooth objects should not have a qrc attribute.") -msgid "smooth objects should not have a qrc attribute." -msgstr "gładkie obiekty nie powinny mieć atrybutu 'qrc'." - -# Recommended/mgcv/R/smooth.r: 2791 -# stop("unimplemented sparse constraint type requested") -msgid "unimplemented sparse constraint type requested" -msgstr "zażądano niezaimplementowanego typu rzadkiego więzu" - -# Recommended/mgcv/R/smooth.r: 2842 -# warning("handling `by' variables in smooth constructors may not work with the summation convention ") -msgid "" -"handling `by' variables in smooth constructors may not work with the " -"summation convention" -msgstr "" -"obsługiwanie zmiennych 'by' w konstruktorach wygładzenia może nie działać z " -"konwencją sumacyjną" - -# Recommended/mgcv/R/smooth.r: 2859 -# stop("Can't find by variable") -# Recommended/mgcv/R/smooth.r: 3124 -# stop("Can't find by variable") -# Recommended/mgcv/R/smooth.r: 3149 -# stop("Can't find by variable") -# Recommended/mgcv/R/smooth.r: 3244 -# stop("Can't find by variable") -msgid "Can't find by variable" -msgstr "Nie można znaleźć poprzez zmienną" - -# Recommended/mgcv/R/smooth.r: 2862 -# stop("factor `by' variables can not be used with matrix arguments.") -msgid "factor `by' variables can not be used with matrix arguments." -msgstr "faktoryzacja zmiennych 'by' nie może być użyta z argumentami macierzy." - -# Recommended/mgcv/R/smooth.r: 2882 -# stop("`by' variable must be same dimension as smooth arguments") -# Recommended/mgcv/R/smooth.r: 3155 -# stop("`by' variable must be same dimension as smooth arguments") -# Recommended/mgcv/R/smooth.r: 3249 -# stop("`by' variable must be same dimension as smooth arguments") -msgid "`by' variable must be same dimension as smooth arguments" -msgstr "zmienna `by' musi mieć ten sam wymiar co argumenty wygładzania" - -msgid "Number of prediction and fit constraints must match" -msgstr "" - -msgid "x and y must be same length" -msgstr "" - -msgid "variable names don't match boundary names" -msgstr "" - -msgid "x and y not same length" -msgstr "" - -# Recommended/mgcv/R/mgcv.r: 3534 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 937 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 979 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 1256 -# stop("L must be a matrix.") -# Recommended/mgcv/R/gam.fit3.r: 1499 -# stop("L must be a matrix.") -#, fuzzy -msgid "bnd must be a list." -msgstr "'L' musi być macierzą." - -msgid "lengths of k and bnd are not compatible." -msgstr "" - -msgid "attempt to select non existent basis function" -msgstr "" - -msgid "coefficient vector wrong length" -msgstr "" - -# Recommended/mgcv/man/negbin.Rd: 27 -# stop("'theta' must be specified") -# Recommended/mgcv/R/gam.fit3.r: 2040 -# stop("'theta' must be specified") -#, fuzzy -msgid "knots must be specified for soap" -msgstr "'theta' musi być określone" - -msgid "soap films are bivariate only" -msgstr "" - -msgid "need at least one interior knot" -msgstr "" - -msgid "can't soap smooth without a boundary" -msgstr "" - -msgid "bnd must be a list of boundary loops" -msgstr "" - -msgid "faulty bnd" -msgstr "" - -msgid "k and bnd lengths are inconsistent" -msgstr "" - -msgid "data outside soap boundary" -msgstr "" - -msgid "no free coefs in sf smooth" -msgstr "" - -msgid "only deals with 2D case" -msgstr "" - -msgid "not enough unique values to find k nearest" -msgstr "" - -msgid "cubic spline only deals with 1D data" -msgstr "" - -msgid "object not fully initialized" -msgstr "" - -# Recommended/mgcv/R/gamm.r: 1343 -# stop("gamm() requires package nlme to be installed") -#~ msgid "gamm() requires package nlme to be installed" -#~ msgstr "'gamm()' wymaga, aby pakiet 'nlme' został zainstalowany" - -# Recommended/mgcv/R/mgcv.r: 116 -# stop(paste("M$S[",i,"] is too large given M$off[",i,"]",sep="")) -#~ msgid "M$S[" -#~ msgstr "M$S[" - -# Recommended/mgcv/R/mgcv.r: 116 -# stop(paste("M$S[",i,"] is too large given M$off[",i,"]",sep="")) -#~ msgid "]" -#~ msgstr "]" - -#~ msgid "Can't mix fixed and estimated penalties in mgcv() - use magic()" -#~ msgstr "" -#~ "Nie można mieszać stałych oraz oszacowanych kar w 'mgcv()' - użyj magic()" - -# Recommended/mgcv/R/mgcv.r: 3282 -# warning("extra arguments discarded") -#~ msgid "extra arguments discarded" -#~ msgstr "dodatkowe argumenty zostały odrzucone" - -# Recommended/mgcv/R/plots.r: 1272 -# stop(paste("View variables must contain more than one value. view = c(",view[1],",",view[2],").",sep="")) -#~ msgid ")." -#~ msgstr ")." - -# Recommended/mgcv/R/mgcv.r: 3473 -# stop(paste("S[[",i,"]] matrix is not +ve definite.",sep="")) -#~ msgid "S[[" -#~ msgstr "Macierz S[[" - -# Recommended/mgcv/R/smooth.r: 1388 -# stop("can't predict outside range of knots with periodic smoother") -#~ msgid "can't predict outside range of knots with periodic smoother" -#~ msgstr "" -#~ "nie można przewidywać poza zakresem węzłów z periodycznym wygładzaniem" diff --git a/po/de.po b/po/de.po index c317e71..eaada69 100644 --- a/po/de.po +++ b/po/de.po @@ -83,14 +83,14 @@ msgstr "QPCLS - Rang-Defizit im Modell" #: tprs.c:40 msgid "You must have 2m>d for a thin plate spline." -msgstr "Es muss 2m>d für eine dünnwandige Spline gelten" +msgstr "Es muss 2m>d für einen dünnwandige Spline gelten." #: tprs.c:375 tprs.c:383 msgid "" "A term has fewer unique covariate combinations than specified maximum " "degrees of freedom" msgstr "" -"Ein Term hat weniger einheitliche Kombinationen von Kovarianten als maximal " +"Ein Term hat weniger einzigartige Kombinationen von Kovariaten als maximal " "angegebene Freiheitsgrade" diff --git a/po/po.po b/po/po.po deleted file mode 100644 index e867d4d..0000000 --- a/po/po.po +++ /dev/null @@ -1,140 +0,0 @@ -msgid "" -msgstr "" -"Project-Id-Version: mgcv 1.7-19\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2015-03-02 20:44+0000\n" -"PO-Revision-Date: 2012-08-31 17:12+0100\n" -"Last-Translator: Łukasz Daniel \n" -"Language-Team: Łukasz Daniel \n" -"Language: pl_PL\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " -"|| n%100>=20) ? 1 : 2)\n" -"X-Poedit-SourceCharset: iso-8859-1\n" - -# Recommended/mgcv/src/magic.c: 406 -# error(_("magic requires smoothing parameter starting values if L supplied")) -#: magic.c:444 -msgid "magic requires smoothing parameter starting values if L supplied" -msgstr "" -"'magic' wymaga wartości startowych dla parametru wygładzającego jeśli L " -"zostało dostarczone" - -# Recommended/mgcv/src/magic.c: 521 -# error(_("magic, the gcv/ubre optimizer, failed to converge after 400 iterations.")) -#: magic.c:562 -msgid "magic, the gcv/ubre optimizer, failed to converge after 400 iterations." -msgstr "" -"'magic', omptymalizator gcv/ubre, nie zdodał uzbieżnić się po 400 iteracjach." - -# Recommended/mgcv/src/matrix.c: 86 -# (_("Failed to initialize memory for matrix."),1) -#: matrix.c:80 -msgid "Failed to initialize memory for matrix." -msgstr "Nie udało się zainicjalizować pamięci dla macierzy." - -# Recommended/mgcv/src/matrix.c: 148 -# (_("An out of bound write to matrix has occurred!"),1) -# Recommended/mgcv/src/matrix.c: 211 -# (_("An out of bound write to matrix has occurred!"),1) -#: matrix.c:142 matrix.c:204 -msgid "An out of bound write to matrix has occurred!" -msgstr "Nastąpił zapis poza zakresem macierzy!" - -# Recommended/mgcv/src/matrix.c: 154 -# (_("INTEGRITY PROBLEM in the extant matrix list."),1) -#: matrix.c:148 -msgid "INTEGRITY PROBLEM in the extant matrix list." -msgstr "PROBLEM SPÓJNOŚCI w istniejącej liście macierzy." - -# Recommended/mgcv/src/matrix.c: 187 -# (_("You are trying to check matrix integrity without defining RANGECHECK.")) -#: matrix.c:180 -msgid "You are trying to check matrix integrity without defining RANGECHECK." -msgstr "Próbujesz sprawdzić integralność macierzy bez określania 'RANGECHECK'" - -# Recommended/mgcv/src/matrix.c: 249 -# (_("Target matrix too small in mcopy"),1) -#: matrix.c:242 -msgid "Target matrix too small in mcopy" -msgstr "Docelowa macierz jest zbyt mała, aby wykonać 'mcopy'" - -# Recommended/mgcv/src/matrix.c: 269 -# (_("Incompatible matrices in matmult."),1) -# Recommended/mgcv/src/matrix.c: 277 -# (_("Incompatible matrices in matmult."),1) -# Recommended/mgcv/src/matrix.c: 290 -# (_("Incompatible matrices in matmult."),1) -# Recommended/mgcv/src/matrix.c: 298 -# (_("Incompatible matrices in matmult."),1) -#: matrix.c:262 matrix.c:270 matrix.c:283 matrix.c:291 -msgid "Incompatible matrices in matmult." -msgstr "Niespójne macierze w 'matmult'." - -# Recommended/mgcv/src/matrix.c: 385 -# (_("Attempt to invert() non-square matrix"),1) -#: matrix.c:378 -msgid "Attempt to invert() non-square matrix" -msgstr "Próba odwrócenia metodą 'invert()' niekwadratowej macierzy" - -# Recommended/mgcv/src/matrix.c: 407 -# (_("Singular Matrix passed to invert()"),1) -#: matrix.c:400 -msgid "Singular Matrix passed to invert()" -msgstr "Przekazano osobliwą macierz do 'invert()'" - -# Recommended/mgcv/src/matrix.c: 1328 -# (_("svd() not converged"),1) -#: matrix.c:1320 -msgid "svd() not converged" -msgstr "'svd()' nie uzbieżnił się" - -#: matrix.c:1396 -#, c-format -msgid "svdroot matrix not +ve semi def. %g" -msgstr "macierz 'svdroot' nie jest dodatnio określona %g" - -# Recommended/mgcv/src/matrix.c: 1432 -# (_("Sort failed"),1) -#: matrix.c:1424 -msgid "Sort failed" -msgstr "Sortowanie nie powiodło się" - -# Recommended/mgcv/src/qp.c: 59 -# (_("ERROR in addconQT."),1) -#: qp.c:58 -msgid "ERROR in addconQT." -msgstr "BŁĄD w addconQT." - -# Recommended/mgcv/src/qp.c: 465 -# (_("QPCLS - Rank deficiency in model"),1) -#: qp.c:464 -msgid "QPCLS - Rank deficiency in model" -msgstr "QPCLS - Niedobór rang w modelu" - -# Recommended/mgcv/src/tprs.c: 44 -# (_("You must have 2m>d for a thin plate spline."),1) -# Recommended/mgcv/src/tprs.c: 79 -# (_("You must have 2m>d for a thin plate spline."),1) -#: tprs.c:40 -msgid "You must have 2m>d for a thin plate spline." -msgstr "Musisz mieć 2m>d dla cienkiej płyty splajnu." - -# Recommended/mgcv/src/tprs.c: 415 -# (_("A term has fewer unique covariate combinations than specified maximum degrees of freedom"),1) -# Recommended/mgcv/src/tprs.c: 423 -# (_("A term has fewer unique covariate combinations than specified maximum degrees of freedom"),1) -# Recommended/mgcv/R/smooth.r: 2471 -# stop( -# "A term has fewer unique covariate combinations than specified maximum degrees of freedom") -#: tprs.c:375 tprs.c:383 -msgid "" -"A term has fewer unique covariate combinations than specified maximum " -"degrees of freedom" -msgstr "" -"Człon posiada mniej unikalnych kombinacji zmiennych niezależnych niż " -"określona maksymalna liczba stopni swobody" - - diff --git a/src/Makevars b/src/Makevars old mode 100755 new mode 100644 diff --git a/src/discrete.c b/src/discrete.c new file mode 100644 index 0000000..c4b7286 --- /dev/null +++ b/src/discrete.c @@ -0,0 +1,441 @@ +/* (c) Simon N. Wood (2015) Released under GPL2 */ + +/* Routines to work with discretized covariate models + Data structures: + * X contains sub matrices making up each term + * nx is number of sub-matrices. + * jth matrix is m[j] by p[j] + * k contains the index vectors for each matrix in Xb. + * ts[i] is starting matrix of ith term + * dt[i] is number of matrices making up ith term. + * n_terms is the number of terms. + * Q contains reparameterization matrices for each term + * qs[i] is starting address within Q for ith repara matrix + R CMD SHLIB discrete2.c NOTE: *Must* put CFLAGS = -fopenmp -O2 in .R/Makevars + + Here is an interesting bit of code for converting an index kk + running over the upper triangle of an nt by nt matrix to rows + and columns (first row corresponds to kk = 0, 1, 2 ,...) + i=kk;r=0;while (i >= *nt-r) { i -= *nt - r; r++;} c = r + i; +*/ +#include +#include +#include + +#include +#include +#include +#include +#include +#ifdef SUPPORT_OPENMP +#include +#endif + +#include "mgcv.h" + +/* basic extraction operations */ + +void singleXj(double *Xj,double *X, int *m, int *k, int *n,int *j) { +/* Extract a column j of matrix stored in compact form in X, k into Xj. + X has m rows. k is of length n. ith row of result is Xj = X[k(i),j] + (an n vector). This function is O(n). +*/ + double *pe; + X += *m * *j; /* shift to start of jth column */ + for (pe = Xj + *n;Xj < pe;Xj++,k++) *Xj = X[*k]; +} /* singleXj */ + +void tensorXj(double *Xj, double *X, int *m, int *p,int *dt, + int *k, int *n, int *j) { +/* Extract a column j of tensor product term matrix stored in compact + form in X, k into Xj. There are dt sub matrices in Xj. The ith is + m[i] by p[i]. There are dt index n - vectors stacked end on end in + k. This function is O(n*dt) + + This routine performs pure extraction only if Xj is a vector of 1s on + entry. Otherwise the jth column is multiplied element wise by the + contents of Xj on entry. +*/ + int q=1,l,i,jp; + double *p0,*p1,*M; + p1 = Xj + *n; /* pointer for end of Xj */ + for (i = 0;i < *dt;i++) q *= p[i]; + jp = *j; + for (i = 0;i < *dt; i++) { + q /= p[i]; /* update q */ + l = jp/q; /* column of current marginal */ + jp = jp%q; + M = X + m[i] * l; /* M now points to start of col l of ith marginal model matrix */ + for (p0=Xj;p00 && j==dt[i]-1) { + c1 = pt[i]*m[q]; + if (c1>dC) dC = c1; /* dimension of working matrix C */ + } + if (j==0) pt[i] = p[q]; else pt[i] *= p[q]; /* term dimension */ + //if (maxm0) voff[i+1] = voff[i] + pt[i]; else voff[i+1] = voff[i]; /* start of ith v matrix */ + if (maxp < pt[i]) maxp = pt[i]; + if (qc[i]<=0) tps[i+1] = tps[i] + pt[i]; /* where ith terms starts in param vector */ + else tps[i+1] = tps[i] + pt[i] - 1; /* there is a tensor constraint to apply - reducing param count*/ + } + /* now form the product term by term... */ + pf=f0 = (double *)calloc((size_t)*n,sizeof(double)); + i = *n; if (i0) { + for (p0=f,p1=f + *n,p2=f0;p0=0) { /* model has AR component, requiring sqrt(weights) */ + for (p0 = w,p1 = w + *n;p00) voff[i+1] = voff[i] + pt[i]; else voff[i+1] = voff[i]; /* start of ith Q matrix */ + if (maxp < pt[i]) maxp=pt[i]; + if (qc[i]<=0) tps[i+1] = tps[i] + pt[i]; /* where ith terms starts in param vector */ + else tps[i+1] = tps[i] + pt[i] - 1; /* there is a tensor constraint to apply - reducing param count*/ + } + Xy0 = (double *) R_chk_calloc((size_t)maxp,sizeof(double)); + work = (double *) R_chk_calloc((size_t)*n,sizeof(double)); + work1 = (double *) R_chk_calloc((size_t)maxm,sizeof(double)); + /* apply W to y - for AR models this needs to be expanded */ + Wy = (double *) R_chk_calloc((size_t)*n,sizeof(double)); /* Wy */ + for (p0=Wy,p1=Wy + *n,p2=w;p0=0) { /* AR components present (weights are sqrt, therefore) */ + rwMatrix(ar_stop,ar_row,ar_weights,Wy,n,&one,&zero); + rwMatrix(ar_stop,ar_row,ar_weights,Wy,n,&one,&one); /* transpose of transform applied */ + for (p0=w,p1=w + *n,p2=Wy;p01) { /* it's a tensor */ + tensorXty(Xy0,work,work1,Wy,X+off[ts[i]],m+ts[i],p+ts[i],dt+i,k+ts[i] * *n,n); + if (qc[i]>0) { /* there is a constraint to apply Z'Xy0: form Q'Xy0 and discard first row... */ + /* Q' = I - vv' */ + for (x=0.0,p0=Xy0,p1=p0 + pt[i],p2=v+voff[i];p0=0) { /* model has AR component, requiring sqrt(weights) */ + for (p0 = w,p1 = w + *n;p00) voff[i+1] = voff[i] + pt[i]; else voff[i+1] = voff[i]; /* start of ith v vector */ + if (maxppd[c]) { /* Form Xr'WXc */ + a=r;b=c; + } else { /* Form Xc'WXr */ + a=c;b=r; + } + /* split cols between threads... */ + dk = pt[b] / *nthreads; rk = pt[b] % *nthreads; + if (dk * *nthreads < pt[b]) dk++;start[0]=0; + for (i=0;i<*nthreads;i++) { + start[i+1] = start[i] + dk; + if (start[i+1]>pt[b]) start[i+1]=pt[b]; + } + #ifdef SUPPORT_OPENMP + #pragma omp parallel private(Xi,i,temp,tempn,p0,p1,p2) num_threads(*nthreads) + #endif + { /* begin parallel section */ + #ifdef SUPPORT_OPENMP + #pragma omp for + #endif + for (kk=0;kk<*nthreads;kk++) { + /* allocate thread specific storage... */ + temp = tempB + kk * maxm; + Xi = XiB + kk * *n; + tempn = tempnB + kk * *n; + for (i=start[kk];i1) { /* tensor */ + for (p0=Xi,p1=p0+*n;p0=0) { /* AR components present (weights are sqrt, therefore) */ + rwMatrix(ar_stop,ar_row,ar_weights,Xi,n,&one,&zero); + rwMatrix(ar_stop,ar_row,ar_weights,Xi,n,&one,&one); /* transpose of transform applied */ + for (p0=w,p1=w + *n,p2=Xi;p01) { /* tensor */ + tensorXty(xwx + i * pt[a],tempn,temp,Xi,X+off[ts[a]],m+ts[a],p+ts[a], + dt+a,k+ *n * ts[a], n); + } else { /* singleton */ + singleXty(xwx + i * pt[a],temp,Xi,X+off[ts[a]],m+ts[a],p+ts[a],k + *n * ts[a],n); + } + } /* loop over columns of Xb */ + } + /* so now xwx contains pt[a] by pt[b] matrix Xa'WXb */ + } /* end parallel section */ + + /* if Xb is tensor, may need to apply constraint */ + if (dt[a]>1&&qc[a]>0) { /* first term is a tensor with a constraint */ + ///* copy xwx to xwx0 */ + //for (p0=xwx,p1=p0 + pt[b]*pt[a],p2=xwx0;p01&&qc[b]>0) { /* second term is a tensor with a constraint */ + /* copy xwx to xwx0 */ + for (p0=xwx,p1=p0 + pt[b]*pa,p2=xwx0;p0pd[c]) { /* xwx = Xr'WXc */ + for (i=0;i1) for (p1 = P2,p2 = P2 + *M * *M;p1= M-r) { i -= M - r; r++;}; c = r + i; /* convert kk to row/col */ + if (r==M-1) bn = nf; else bn = *nb; /* (row) B block size */ + bs = r * *nb; /* (row) B block start */ + if (c==r) { /* diagonal block */ + for (k=0;k0) { /* R is transposed */ + for (i=0;i<*p;i++) { + for (p0=Vi,k=0;k<*M;k++) { + /* Vi is i by M */ + p1 = dR + k * *p * *p + i * *p; /* start of col i of kth dR */ + p2 = p1 + i + 1; /* first zero in col i of kth dR */ + for (;p1i) dR[k] = (dA[k] - x - R[k]*dR[i + i * *p])/R[i + i * *p]; + else dR[k] = (dA[k] - x)*.5/R[i + i * *p]; + } +} /* dchol */ void mgcv_chol(double *a,int *pivot,int *n,int *rank) /* a stored in column order, this routine finds the pivoted choleski decomposition of matrix a @@ -1427,7 +1584,7 @@ void mgcv_pbsi(double *R,int *r,int *nt) { } #ifdef SUPPORT_OPENMP - #pragma omp parallel private(b,i,k,zz,rr) num_threads(*nt) +#pragma omp parallel private(b,i,k,zz,rr,r2) num_threads(*nt) #endif { /* open parallel section */ #ifdef SUPPORT_OPENMP @@ -1486,7 +1643,7 @@ void mgcv_PPt(double *A,double *R,int *r,int *nt) { } #ifdef SUPPORT_OPENMP - #pragma omp parallel private(b,i,ru,rl) num_threads(*nt) + #pragma omp parallel private(b,i,ru,rl,r1) num_threads(*nt) #endif { /* open parallel section */ #ifdef SUPPORT_OPENMP @@ -1539,7 +1696,7 @@ void mgcv_PPt(double *A,double *R,int *r,int *nt) { if (a[i]<=a[i-1]) a[i] = a[i-1]+1; } #ifdef SUPPORT_OPENMP - #pragma omp parallel private(b,i,rl) num_threads(*nt) + #pragma omp parallel private(b,i,rl,r1) num_threads(*nt) #endif { /* start parallel block */ #ifdef SUPPORT_OPENMP @@ -1601,7 +1758,55 @@ void mgcv_forwardsolve(double *R,int *r,int *c,double *B,double *C, int *bc) char side='L',uplo='U',transa='T',diag='N'; for (pC=C,pR=pC+ *bc * *c;pC