diff --git a/.Rprofile b/.Rprofile index 81b960f..8cdb656 100644 --- a/.Rprofile +++ b/.Rprofile @@ -1 +1,3 @@ -source("renv/activate.R") +if (Sys.getenv("R_BUILD_CHECK") != "TRUE") { + source("renv/activate.R") +} diff --git a/_extensions/quarto-ext/fontawesome/_extension.yml b/_extensions/quarto-ext/fontawesome/_extension.yml new file mode 100644 index 0000000..c0787a8 --- /dev/null +++ b/_extensions/quarto-ext/fontawesome/_extension.yml @@ -0,0 +1,7 @@ +title: Font Awesome support +author: Carlos Scheidegger +version: 1.1.0 +quarto-required: ">=1.2.269" +contributes: + shortcodes: + - fontawesome.lua diff --git a/_extensions/quarto-ext/fontawesome/assets/css/all.css b/_extensions/quarto-ext/fontawesome/assets/css/all.css new file mode 100644 index 0000000..3e24980 --- /dev/null +++ b/_extensions/quarto-ext/fontawesome/assets/css/all.css @@ -0,0 +1,7971 @@ +/*! + * Font Awesome Free 6.4.2 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2023 Fonticons, Inc. + */ +.fa { + font-family: var(--fa-style-family, "Font Awesome 6 Free"); + font-weight: var(--fa-style, 900); } + +.fa, +.fa-classic, +.fa-sharp, +.fas, +.fa-solid, +.far, +.fa-regular, +.fab, +.fa-brands, +.fal, +.fa-light, +.fat, +.fa-thin, +.fad, +.fa-duotone { + -moz-osx-font-smoothing: grayscale; + -webkit-font-smoothing: antialiased; + display: var(--fa-display, inline-block); + font-style: normal; + font-variant: normal; + line-height: 1; + text-rendering: auto; } + +.fas, +.fa-classic, +.fa-solid, +.far, +.fa-regular { + font-family: 'Font Awesome 6 Free'; } + +.fab, +.fa-brands { + font-family: 'Font Awesome 6 Brands'; } + +.fa-1x { + font-size: 1em; } + +.fa-2x { + font-size: 2em; } + +.fa-3x { + font-size: 3em; } + +.fa-4x { + font-size: 4em; } + +.fa-5x { + font-size: 5em; } + +.fa-6x { + font-size: 6em; } + +.fa-7x { + font-size: 7em; } + +.fa-8x { + font-size: 8em; } + +.fa-9x { + font-size: 9em; } + +.fa-10x { + font-size: 10em; } + +.fa-2xs { + font-size: 0.625em; + line-height: 0.1em; + vertical-align: 0.225em; } + +.fa-xs { + font-size: 0.75em; + line-height: 0.08333em; + vertical-align: 0.125em; } + +.fa-sm { + font-size: 0.875em; + line-height: 0.07143em; + vertical-align: 0.05357em; } + +.fa-lg { + font-size: 1.25em; + line-height: 0.05em; + vertical-align: -0.075em; } + +.fa-xl { + font-size: 1.5em; + line-height: 0.04167em; + vertical-align: -0.125em; } + +.fa-2xl { + font-size: 2em; + line-height: 0.03125em; + vertical-align: -0.1875em; } + +.fa-fw { + text-align: center; + width: 1.25em; } + +.fa-ul { + list-style-type: none; + margin-left: var(--fa-li-margin, 2.5em); + padding-left: 0; } + .fa-ul > li { + position: relative; } + +.fa-li { + left: calc(var(--fa-li-width, 2em) * -1); + position: absolute; + text-align: center; + width: var(--fa-li-width, 2em); + line-height: inherit; } + +.fa-border { + border-color: var(--fa-border-color, #eee); + border-radius: var(--fa-border-radius, 0.1em); + border-style: var(--fa-border-style, solid); + border-width: var(--fa-border-width, 0.08em); + padding: var(--fa-border-padding, 0.2em 0.25em 0.15em); } + +.fa-pull-left { + float: left; + margin-right: var(--fa-pull-margin, 0.3em); } + +.fa-pull-right { + float: right; + margin-left: var(--fa-pull-margin, 0.3em); } + +.fa-beat { + -webkit-animation-name: fa-beat; + animation-name: fa-beat; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out); + animation-timing-function: var(--fa-animation-timing, ease-in-out); } + +.fa-bounce { + -webkit-animation-name: fa-bounce; + animation-name: fa-bounce; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1)); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1)); } + +.fa-fade { + -webkit-animation-name: fa-fade; + animation-name: fa-fade; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); } + +.fa-beat-fade { + -webkit-animation-name: fa-beat-fade; + animation-name: fa-beat-fade; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); } + +.fa-flip { + -webkit-animation-name: fa-flip; + animation-name: fa-flip; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out); + animation-timing-function: var(--fa-animation-timing, ease-in-out); } + +.fa-shake { + -webkit-animation-name: fa-shake; + animation-name: fa-shake; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, linear); + animation-timing-function: var(--fa-animation-timing, linear); } + +.fa-spin { + -webkit-animation-name: fa-spin; + animation-name: fa-spin; + -webkit-animation-delay: var(--fa-animation-delay, 0s); + animation-delay: var(--fa-animation-delay, 0s); + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 2s); + animation-duration: var(--fa-animation-duration, 2s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, linear); + animation-timing-function: var(--fa-animation-timing, linear); } + +.fa-spin-reverse { + --fa-animation-direction: reverse; } + +.fa-pulse, +.fa-spin-pulse { + -webkit-animation-name: fa-spin; + animation-name: fa-spin; + -webkit-animation-direction: var(--fa-animation-direction, normal); + animation-direction: var(--fa-animation-direction, normal); + -webkit-animation-duration: var(--fa-animation-duration, 1s); + animation-duration: var(--fa-animation-duration, 1s); + -webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + -webkit-animation-timing-function: var(--fa-animation-timing, steps(8)); + animation-timing-function: var(--fa-animation-timing, steps(8)); } + +@media (prefers-reduced-motion: reduce) { + .fa-beat, + .fa-bounce, + .fa-fade, + .fa-beat-fade, + .fa-flip, + .fa-pulse, + .fa-shake, + .fa-spin, + .fa-spin-pulse { + -webkit-animation-delay: -1ms; + animation-delay: -1ms; + -webkit-animation-duration: 1ms; + animation-duration: 1ms; + -webkit-animation-iteration-count: 1; + animation-iteration-count: 1; + -webkit-transition-delay: 0s; + transition-delay: 0s; + -webkit-transition-duration: 0s; + transition-duration: 0s; } } + +@-webkit-keyframes fa-beat { + 0%, 90% { + -webkit-transform: scale(1); + transform: scale(1); } + 45% { + -webkit-transform: scale(var(--fa-beat-scale, 1.25)); + transform: scale(var(--fa-beat-scale, 1.25)); } } + +@keyframes fa-beat { + 0%, 90% { + -webkit-transform: scale(1); + transform: scale(1); } + 45% { + -webkit-transform: scale(var(--fa-beat-scale, 1.25)); + transform: scale(var(--fa-beat-scale, 1.25)); } } + +@-webkit-keyframes fa-bounce { + 0% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } + 10% { + -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); + transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); } + 30% { + -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); + transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); } + 50% { + -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); + transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); } + 57% { + -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); + transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); } + 64% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } + 100% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } } + +@keyframes fa-bounce { + 0% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } + 10% { + -webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); + transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); } + 30% { + -webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); + transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); } + 50% { + -webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); + transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); } + 57% { + -webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); + transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); } + 64% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } + 100% { + -webkit-transform: scale(1, 1) translateY(0); + transform: scale(1, 1) translateY(0); } } + +@-webkit-keyframes fa-fade { + 50% { + opacity: var(--fa-fade-opacity, 0.4); } } + +@keyframes fa-fade { + 50% { + opacity: var(--fa-fade-opacity, 0.4); } } + +@-webkit-keyframes fa-beat-fade { + 0%, 100% { + opacity: var(--fa-beat-fade-opacity, 0.4); + -webkit-transform: scale(1); + transform: scale(1); } + 50% { + opacity: 1; + -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125)); + transform: scale(var(--fa-beat-fade-scale, 1.125)); } } + +@keyframes fa-beat-fade { + 0%, 100% { + opacity: var(--fa-beat-fade-opacity, 0.4); + -webkit-transform: scale(1); + transform: scale(1); } + 50% { + opacity: 1; + -webkit-transform: scale(var(--fa-beat-fade-scale, 1.125)); + transform: scale(var(--fa-beat-fade-scale, 1.125)); } } + +@-webkit-keyframes fa-flip { + 50% { + -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); + transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); } } + +@keyframes fa-flip { + 50% { + -webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); + transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); } } + +@-webkit-keyframes fa-shake { + 0% { + -webkit-transform: rotate(-15deg); + transform: rotate(-15deg); } + 4% { + -webkit-transform: rotate(15deg); + transform: rotate(15deg); } + 8%, 24% { + -webkit-transform: rotate(-18deg); + transform: rotate(-18deg); } + 12%, 28% { + -webkit-transform: rotate(18deg); + transform: rotate(18deg); } + 16% { + -webkit-transform: rotate(-22deg); + transform: rotate(-22deg); } + 20% { + -webkit-transform: rotate(22deg); + transform: rotate(22deg); } + 32% { + -webkit-transform: rotate(-12deg); + transform: rotate(-12deg); } + 36% { + -webkit-transform: rotate(12deg); + transform: rotate(12deg); } + 40%, 100% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); } } + +@keyframes fa-shake { + 0% { + -webkit-transform: rotate(-15deg); + transform: rotate(-15deg); } + 4% { + -webkit-transform: rotate(15deg); + transform: rotate(15deg); } + 8%, 24% { + -webkit-transform: rotate(-18deg); + transform: rotate(-18deg); } + 12%, 28% { + -webkit-transform: rotate(18deg); + transform: rotate(18deg); } + 16% { + -webkit-transform: rotate(-22deg); + transform: rotate(-22deg); } + 20% { + -webkit-transform: rotate(22deg); + transform: rotate(22deg); } + 32% { + -webkit-transform: rotate(-12deg); + transform: rotate(-12deg); } + 36% { + -webkit-transform: rotate(12deg); + transform: rotate(12deg); } + 40%, 100% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); } } + +@-webkit-keyframes fa-spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); } + 100% { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); } } + +@keyframes fa-spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); } + 100% { + -webkit-transform: rotate(360deg); + transform: rotate(360deg); } } + +.fa-rotate-90 { + -webkit-transform: rotate(90deg); + transform: rotate(90deg); } + +.fa-rotate-180 { + -webkit-transform: rotate(180deg); + transform: rotate(180deg); } + +.fa-rotate-270 { + -webkit-transform: rotate(270deg); + transform: rotate(270deg); } + +.fa-flip-horizontal { + -webkit-transform: scale(-1, 1); + transform: scale(-1, 1); } + +.fa-flip-vertical { + -webkit-transform: scale(1, -1); + transform: scale(1, -1); } + +.fa-flip-both, +.fa-flip-horizontal.fa-flip-vertical { + -webkit-transform: scale(-1, -1); + transform: scale(-1, -1); } + +.fa-rotate-by { + -webkit-transform: rotate(var(--fa-rotate-angle, none)); + transform: rotate(var(--fa-rotate-angle, none)); } + +.fa-stack { + display: inline-block; + height: 2em; + line-height: 2em; + position: relative; + vertical-align: middle; + width: 2.5em; } + +.fa-stack-1x, +.fa-stack-2x { + left: 0; + position: absolute; + text-align: center; + width: 100%; + z-index: var(--fa-stack-z-index, auto); } + +.fa-stack-1x { + line-height: inherit; } + +.fa-stack-2x { + font-size: 2em; } + +.fa-inverse { + color: var(--fa-inverse, #fff); } + +/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen +readers do not read off random characters that represent icons */ + +.fa-0::before { + content: "\30"; } + +.fa-1::before { + content: "\31"; } + +.fa-2::before { + content: "\32"; } + +.fa-3::before { + content: "\33"; } + +.fa-4::before { + content: "\34"; } + +.fa-5::before { + content: "\35"; } + +.fa-6::before { + content: "\36"; } + +.fa-7::before { + content: "\37"; } + +.fa-8::before { + content: "\38"; } + +.fa-9::before { + content: "\39"; } + +.fa-fill-drip::before { + content: "\f576"; } + +.fa-arrows-to-circle::before { + content: "\e4bd"; } + +.fa-circle-chevron-right::before { + content: "\f138"; } + +.fa-chevron-circle-right::before { + content: "\f138"; } + +.fa-at::before { + content: "\40"; } + +.fa-trash-can::before { + content: "\f2ed"; } + +.fa-trash-alt::before { + content: "\f2ed"; } + +.fa-text-height::before { + content: "\f034"; } + +.fa-user-xmark::before { + content: "\f235"; } + +.fa-user-times::before { + content: "\f235"; } + +.fa-stethoscope::before { + content: "\f0f1"; } + +.fa-message::before { + content: "\f27a"; } + +.fa-comment-alt::before { + content: "\f27a"; } + +.fa-info::before { + content: "\f129"; } + +.fa-down-left-and-up-right-to-center::before { + content: "\f422"; } + +.fa-compress-alt::before { + content: "\f422"; } + +.fa-explosion::before { + content: "\e4e9"; } + +.fa-file-lines::before { + content: "\f15c"; } + +.fa-file-alt::before { + content: "\f15c"; } + +.fa-file-text::before { + content: "\f15c"; } + +.fa-wave-square::before { + content: "\f83e"; } + +.fa-ring::before { + content: "\f70b"; } + +.fa-building-un::before { + content: "\e4d9"; } + +.fa-dice-three::before { + content: "\f527"; } + +.fa-calendar-days::before { + content: "\f073"; } + +.fa-calendar-alt::before { + content: "\f073"; } + +.fa-anchor-circle-check::before { + content: "\e4aa"; } + +.fa-building-circle-arrow-right::before { + content: "\e4d1"; } + +.fa-volleyball::before { + content: "\f45f"; } + +.fa-volleyball-ball::before { + content: "\f45f"; } + +.fa-arrows-up-to-line::before { + content: "\e4c2"; } + +.fa-sort-down::before { + content: "\f0dd"; } + +.fa-sort-desc::before { + content: "\f0dd"; } + +.fa-circle-minus::before { + content: "\f056"; } + +.fa-minus-circle::before { + content: "\f056"; } + +.fa-door-open::before { + content: "\f52b"; } + +.fa-right-from-bracket::before { + content: "\f2f5"; } + +.fa-sign-out-alt::before { + content: "\f2f5"; } + +.fa-atom::before { + content: "\f5d2"; } + +.fa-soap::before { + content: "\e06e"; } + +.fa-icons::before { + content: "\f86d"; } + +.fa-heart-music-camera-bolt::before { + content: "\f86d"; } + +.fa-microphone-lines-slash::before { + content: "\f539"; } + +.fa-microphone-alt-slash::before { + content: "\f539"; } + +.fa-bridge-circle-check::before { + content: "\e4c9"; } + +.fa-pump-medical::before { + content: "\e06a"; } + +.fa-fingerprint::before { + content: "\f577"; } + +.fa-hand-point-right::before { + content: "\f0a4"; } + +.fa-magnifying-glass-location::before { + content: "\f689"; } + +.fa-search-location::before { + content: "\f689"; } + +.fa-forward-step::before { + content: "\f051"; } + +.fa-step-forward::before { + content: "\f051"; } + +.fa-face-smile-beam::before { + content: "\f5b8"; } + +.fa-smile-beam::before { + content: "\f5b8"; } + +.fa-flag-checkered::before { + content: "\f11e"; } + +.fa-football::before { + content: "\f44e"; } + +.fa-football-ball::before { + content: "\f44e"; } + +.fa-school-circle-exclamation::before { + content: "\e56c"; } + +.fa-crop::before { + content: "\f125"; } + +.fa-angles-down::before { + content: "\f103"; } + +.fa-angle-double-down::before { + content: "\f103"; } + +.fa-users-rectangle::before { + content: "\e594"; } + +.fa-people-roof::before { + content: "\e537"; } + +.fa-people-line::before { + content: "\e534"; } + +.fa-beer-mug-empty::before { + content: "\f0fc"; } + +.fa-beer::before { + content: "\f0fc"; } + +.fa-diagram-predecessor::before { + content: "\e477"; } + +.fa-arrow-up-long::before { + content: "\f176"; } + +.fa-long-arrow-up::before { + content: "\f176"; } + +.fa-fire-flame-simple::before { + content: "\f46a"; } + +.fa-burn::before { + content: "\f46a"; } + +.fa-person::before { + content: "\f183"; } + +.fa-male::before { + content: "\f183"; } + +.fa-laptop::before { + content: "\f109"; } + +.fa-file-csv::before { + content: "\f6dd"; } + +.fa-menorah::before { + content: "\f676"; } + +.fa-truck-plane::before { + content: "\e58f"; } + +.fa-record-vinyl::before { + content: "\f8d9"; } + +.fa-face-grin-stars::before { + content: "\f587"; } + +.fa-grin-stars::before { + content: "\f587"; } + +.fa-bong::before { + content: "\f55c"; } + +.fa-spaghetti-monster-flying::before { + content: "\f67b"; } + +.fa-pastafarianism::before { + content: "\f67b"; } + +.fa-arrow-down-up-across-line::before { + content: "\e4af"; } + +.fa-spoon::before { + content: "\f2e5"; } + +.fa-utensil-spoon::before { + content: "\f2e5"; } + +.fa-jar-wheat::before { + content: "\e517"; } + +.fa-envelopes-bulk::before { + content: "\f674"; } + +.fa-mail-bulk::before { + content: "\f674"; } + +.fa-file-circle-exclamation::before { + content: "\e4eb"; } + +.fa-circle-h::before { + content: "\f47e"; } + +.fa-hospital-symbol::before { + content: "\f47e"; } + +.fa-pager::before { + content: "\f815"; } + +.fa-address-book::before { + content: "\f2b9"; } + +.fa-contact-book::before { + content: "\f2b9"; } + +.fa-strikethrough::before { + content: "\f0cc"; } + +.fa-k::before { + content: "\4b"; } + +.fa-landmark-flag::before { + content: "\e51c"; } + +.fa-pencil::before { + content: "\f303"; } + +.fa-pencil-alt::before { + content: "\f303"; } + +.fa-backward::before { + content: "\f04a"; } + +.fa-caret-right::before { + content: "\f0da"; } + +.fa-comments::before { + content: "\f086"; } + +.fa-paste::before { + content: "\f0ea"; } + +.fa-file-clipboard::before { + content: "\f0ea"; } + +.fa-code-pull-request::before { + content: "\e13c"; } + +.fa-clipboard-list::before { + content: "\f46d"; } + +.fa-truck-ramp-box::before { + content: "\f4de"; } + +.fa-truck-loading::before { + content: "\f4de"; } + +.fa-user-check::before { + content: "\f4fc"; } + +.fa-vial-virus::before { + content: "\e597"; } + +.fa-sheet-plastic::before { + content: "\e571"; } + +.fa-blog::before { + content: "\f781"; } + +.fa-user-ninja::before { + content: "\f504"; } + +.fa-person-arrow-up-from-line::before { + content: "\e539"; } + +.fa-scroll-torah::before { + content: "\f6a0"; } + +.fa-torah::before { + content: "\f6a0"; } + +.fa-broom-ball::before { + content: "\f458"; } + +.fa-quidditch::before { + content: "\f458"; } + +.fa-quidditch-broom-ball::before { + content: "\f458"; } + +.fa-toggle-off::before { + content: "\f204"; } + +.fa-box-archive::before { + content: "\f187"; } + +.fa-archive::before { + content: "\f187"; } + +.fa-person-drowning::before { + content: "\e545"; } + +.fa-arrow-down-9-1::before { + content: "\f886"; } + +.fa-sort-numeric-desc::before { + content: "\f886"; } + +.fa-sort-numeric-down-alt::before { + content: "\f886"; } + +.fa-face-grin-tongue-squint::before { + content: "\f58a"; } + +.fa-grin-tongue-squint::before { + content: "\f58a"; } + +.fa-spray-can::before { + content: "\f5bd"; } + +.fa-truck-monster::before { + content: "\f63b"; } + +.fa-w::before { + content: "\57"; } + +.fa-earth-africa::before { + content: "\f57c"; } + +.fa-globe-africa::before { + content: "\f57c"; } + +.fa-rainbow::before { + content: "\f75b"; } + +.fa-circle-notch::before { + content: "\f1ce"; } + +.fa-tablet-screen-button::before { + content: "\f3fa"; } + +.fa-tablet-alt::before { + content: "\f3fa"; } + +.fa-paw::before { + content: "\f1b0"; } + +.fa-cloud::before { + content: "\f0c2"; } + +.fa-trowel-bricks::before { + content: "\e58a"; } + +.fa-face-flushed::before { + content: "\f579"; } + +.fa-flushed::before { + content: "\f579"; } + +.fa-hospital-user::before { + content: "\f80d"; } + +.fa-tent-arrow-left-right::before { + content: "\e57f"; } + +.fa-gavel::before { + content: "\f0e3"; } + +.fa-legal::before { + content: "\f0e3"; } + +.fa-binoculars::before { + content: "\f1e5"; } + +.fa-microphone-slash::before { + content: "\f131"; } + +.fa-box-tissue::before { + content: "\e05b"; } + +.fa-motorcycle::before { + content: "\f21c"; } + +.fa-bell-concierge::before { + content: "\f562"; } + +.fa-concierge-bell::before { + content: "\f562"; } + +.fa-pen-ruler::before { + content: "\f5ae"; } + +.fa-pencil-ruler::before { + content: "\f5ae"; } + +.fa-people-arrows::before { + content: "\e068"; } + +.fa-people-arrows-left-right::before { + content: "\e068"; } + +.fa-mars-and-venus-burst::before { + content: "\e523"; } + +.fa-square-caret-right::before { + content: "\f152"; } + +.fa-caret-square-right::before { + content: "\f152"; } + +.fa-scissors::before { + content: "\f0c4"; } + +.fa-cut::before { + content: "\f0c4"; } + +.fa-sun-plant-wilt::before { + content: "\e57a"; } + +.fa-toilets-portable::before { + content: "\e584"; } + +.fa-hockey-puck::before { + content: "\f453"; } + +.fa-table::before { + content: "\f0ce"; } + +.fa-magnifying-glass-arrow-right::before { + content: "\e521"; } + +.fa-tachograph-digital::before { + content: "\f566"; } + +.fa-digital-tachograph::before { + content: "\f566"; } + +.fa-users-slash::before { + content: "\e073"; } + +.fa-clover::before { + content: "\e139"; } + +.fa-reply::before { + content: "\f3e5"; } + +.fa-mail-reply::before { + content: "\f3e5"; } + +.fa-star-and-crescent::before { + content: "\f699"; } + +.fa-house-fire::before { + content: "\e50c"; } + +.fa-square-minus::before { + content: "\f146"; } + +.fa-minus-square::before { + content: "\f146"; } + +.fa-helicopter::before { + content: "\f533"; } + +.fa-compass::before { + content: "\f14e"; } + +.fa-square-caret-down::before { + content: "\f150"; } + +.fa-caret-square-down::before { + content: "\f150"; } + +.fa-file-circle-question::before { + content: "\e4ef"; } + +.fa-laptop-code::before { + content: "\f5fc"; } + +.fa-swatchbook::before { + content: "\f5c3"; } + +.fa-prescription-bottle::before { + content: "\f485"; } + +.fa-bars::before { + content: "\f0c9"; } + +.fa-navicon::before { + content: "\f0c9"; } + +.fa-people-group::before { + content: "\e533"; } + +.fa-hourglass-end::before { + content: "\f253"; } + +.fa-hourglass-3::before { + content: "\f253"; } + +.fa-heart-crack::before { + content: "\f7a9"; } + +.fa-heart-broken::before { + content: "\f7a9"; } + +.fa-square-up-right::before { + content: "\f360"; } + +.fa-external-link-square-alt::before { + content: "\f360"; } + +.fa-face-kiss-beam::before { + content: "\f597"; } + +.fa-kiss-beam::before { + content: "\f597"; } + +.fa-film::before { + content: "\f008"; } + +.fa-ruler-horizontal::before { + content: "\f547"; } + +.fa-people-robbery::before { + content: "\e536"; } + +.fa-lightbulb::before { + content: "\f0eb"; } + +.fa-caret-left::before { + content: "\f0d9"; } + +.fa-circle-exclamation::before { + content: "\f06a"; } + +.fa-exclamation-circle::before { + content: "\f06a"; } + +.fa-school-circle-xmark::before { + content: "\e56d"; } + +.fa-arrow-right-from-bracket::before { + content: "\f08b"; } + +.fa-sign-out::before { + content: "\f08b"; } + +.fa-circle-chevron-down::before { + content: "\f13a"; } + +.fa-chevron-circle-down::before { + content: "\f13a"; } + +.fa-unlock-keyhole::before { + content: "\f13e"; } + +.fa-unlock-alt::before { + content: "\f13e"; } + +.fa-cloud-showers-heavy::before { + content: "\f740"; } + +.fa-headphones-simple::before { + content: "\f58f"; } + +.fa-headphones-alt::before { + content: "\f58f"; } + +.fa-sitemap::before { + content: "\f0e8"; } + +.fa-circle-dollar-to-slot::before { + content: "\f4b9"; } + +.fa-donate::before { + content: "\f4b9"; } + +.fa-memory::before { + content: "\f538"; } + +.fa-road-spikes::before { + content: "\e568"; } + +.fa-fire-burner::before { + content: "\e4f1"; } + +.fa-flag::before { + content: "\f024"; } + +.fa-hanukiah::before { + content: "\f6e6"; } + +.fa-feather::before { + content: "\f52d"; } + +.fa-volume-low::before { + content: "\f027"; } + +.fa-volume-down::before { + content: "\f027"; } + +.fa-comment-slash::before { + content: "\f4b3"; } + +.fa-cloud-sun-rain::before { + content: "\f743"; } + +.fa-compress::before { + content: "\f066"; } + +.fa-wheat-awn::before { + content: "\e2cd"; } + +.fa-wheat-alt::before { + content: "\e2cd"; } + +.fa-ankh::before { + content: "\f644"; } + +.fa-hands-holding-child::before { + content: "\e4fa"; } + +.fa-asterisk::before { + content: "\2a"; } + +.fa-square-check::before { + content: "\f14a"; } + +.fa-check-square::before { + content: "\f14a"; } + +.fa-peseta-sign::before { + content: "\e221"; } + +.fa-heading::before { + content: "\f1dc"; } + +.fa-header::before { + content: "\f1dc"; } + +.fa-ghost::before { + content: "\f6e2"; } + +.fa-list::before { + content: "\f03a"; } + +.fa-list-squares::before { + content: "\f03a"; } + +.fa-square-phone-flip::before { + content: "\f87b"; } + +.fa-phone-square-alt::before { + content: "\f87b"; } + +.fa-cart-plus::before { + content: "\f217"; } + +.fa-gamepad::before { + content: "\f11b"; } + +.fa-circle-dot::before { + content: "\f192"; } + +.fa-dot-circle::before { + content: "\f192"; } + +.fa-face-dizzy::before { + content: "\f567"; } + +.fa-dizzy::before { + content: "\f567"; } + +.fa-egg::before { + content: "\f7fb"; } + +.fa-house-medical-circle-xmark::before { + content: "\e513"; } + +.fa-campground::before { + content: "\f6bb"; } + +.fa-folder-plus::before { + content: "\f65e"; } + +.fa-futbol::before { + content: "\f1e3"; } + +.fa-futbol-ball::before { + content: "\f1e3"; } + +.fa-soccer-ball::before { + content: "\f1e3"; } + +.fa-paintbrush::before { + content: "\f1fc"; } + +.fa-paint-brush::before { + content: "\f1fc"; } + +.fa-lock::before { + content: "\f023"; } + +.fa-gas-pump::before { + content: "\f52f"; } + +.fa-hot-tub-person::before { + content: "\f593"; } + +.fa-hot-tub::before { + content: "\f593"; } + +.fa-map-location::before { + content: "\f59f"; } + +.fa-map-marked::before { + content: "\f59f"; } + +.fa-house-flood-water::before { + content: "\e50e"; } + +.fa-tree::before { + content: "\f1bb"; } + +.fa-bridge-lock::before { + content: "\e4cc"; } + +.fa-sack-dollar::before { + content: "\f81d"; } + +.fa-pen-to-square::before { + content: "\f044"; } + +.fa-edit::before { + content: "\f044"; } + +.fa-car-side::before { + content: "\f5e4"; } + +.fa-share-nodes::before { + content: "\f1e0"; } + +.fa-share-alt::before { + content: "\f1e0"; } + +.fa-heart-circle-minus::before { + content: "\e4ff"; } + +.fa-hourglass-half::before { + content: "\f252"; } + +.fa-hourglass-2::before { + content: "\f252"; } + +.fa-microscope::before { + content: "\f610"; } + +.fa-sink::before { + content: "\e06d"; } + +.fa-bag-shopping::before { + content: "\f290"; } + +.fa-shopping-bag::before { + content: "\f290"; } + +.fa-arrow-down-z-a::before { + content: "\f881"; } + +.fa-sort-alpha-desc::before { + content: "\f881"; } + +.fa-sort-alpha-down-alt::before { + content: "\f881"; } + +.fa-mitten::before { + content: "\f7b5"; } + +.fa-person-rays::before { + content: "\e54d"; } + +.fa-users::before { + content: "\f0c0"; } + +.fa-eye-slash::before { + content: "\f070"; } + +.fa-flask-vial::before { + content: "\e4f3"; } + +.fa-hand::before { + content: "\f256"; } + +.fa-hand-paper::before { + content: "\f256"; } + +.fa-om::before { + content: "\f679"; } + +.fa-worm::before { + content: "\e599"; } + +.fa-house-circle-xmark::before { + content: "\e50b"; } + +.fa-plug::before { + content: "\f1e6"; } + +.fa-chevron-up::before { + content: "\f077"; } + +.fa-hand-spock::before { + content: "\f259"; } + +.fa-stopwatch::before { + content: "\f2f2"; } + +.fa-face-kiss::before { + content: "\f596"; } + +.fa-kiss::before { + content: "\f596"; } + +.fa-bridge-circle-xmark::before { + content: "\e4cb"; } + +.fa-face-grin-tongue::before { + content: "\f589"; } + +.fa-grin-tongue::before { + content: "\f589"; } + +.fa-chess-bishop::before { + content: "\f43a"; } + +.fa-face-grin-wink::before { + content: "\f58c"; } + +.fa-grin-wink::before { + content: "\f58c"; } + +.fa-ear-deaf::before { + content: "\f2a4"; } + +.fa-deaf::before { + content: "\f2a4"; } + +.fa-deafness::before { + content: "\f2a4"; } + +.fa-hard-of-hearing::before { + content: "\f2a4"; } + +.fa-road-circle-check::before { + content: "\e564"; } + +.fa-dice-five::before { + content: "\f523"; } + +.fa-square-rss::before { + content: "\f143"; } + +.fa-rss-square::before { + content: "\f143"; } + +.fa-land-mine-on::before { + content: "\e51b"; } + +.fa-i-cursor::before { + content: "\f246"; } + +.fa-stamp::before { + content: "\f5bf"; } + +.fa-stairs::before { + content: "\e289"; } + +.fa-i::before { + content: "\49"; } + +.fa-hryvnia-sign::before { + content: "\f6f2"; } + +.fa-hryvnia::before { + content: "\f6f2"; } + +.fa-pills::before { + content: "\f484"; } + +.fa-face-grin-wide::before { + content: "\f581"; } + +.fa-grin-alt::before { + content: "\f581"; } + +.fa-tooth::before { + content: "\f5c9"; } + +.fa-v::before { + content: "\56"; } + +.fa-bangladeshi-taka-sign::before { + content: "\e2e6"; } + +.fa-bicycle::before { + content: "\f206"; } + +.fa-staff-snake::before { + content: "\e579"; } + +.fa-rod-asclepius::before { + content: "\e579"; } + +.fa-rod-snake::before { + content: "\e579"; } + +.fa-staff-aesculapius::before { + content: "\e579"; } + +.fa-head-side-cough-slash::before { + content: "\e062"; } + +.fa-truck-medical::before { + content: "\f0f9"; } + +.fa-ambulance::before { + content: "\f0f9"; } + +.fa-wheat-awn-circle-exclamation::before { + content: "\e598"; } + +.fa-snowman::before { + content: "\f7d0"; } + +.fa-mortar-pestle::before { + content: "\f5a7"; } + +.fa-road-barrier::before { + content: "\e562"; } + +.fa-school::before { + content: "\f549"; } + +.fa-igloo::before { + content: "\f7ae"; } + +.fa-joint::before { + content: "\f595"; } + +.fa-angle-right::before { + content: "\f105"; } + +.fa-horse::before { + content: "\f6f0"; } + +.fa-q::before { + content: "\51"; } + +.fa-g::before { + content: "\47"; } + +.fa-notes-medical::before { + content: "\f481"; } + +.fa-temperature-half::before { + content: "\f2c9"; } + +.fa-temperature-2::before { + content: "\f2c9"; } + +.fa-thermometer-2::before { + content: "\f2c9"; } + +.fa-thermometer-half::before { + content: "\f2c9"; } + +.fa-dong-sign::before { + content: "\e169"; } + +.fa-capsules::before { + content: "\f46b"; } + +.fa-poo-storm::before { + content: "\f75a"; } + +.fa-poo-bolt::before { + content: "\f75a"; } + +.fa-face-frown-open::before { + content: "\f57a"; } + +.fa-frown-open::before { + content: "\f57a"; } + +.fa-hand-point-up::before { + content: "\f0a6"; } + +.fa-money-bill::before { + content: "\f0d6"; } + +.fa-bookmark::before { + content: "\f02e"; } + +.fa-align-justify::before { + content: "\f039"; } + +.fa-umbrella-beach::before { + content: "\f5ca"; } + +.fa-helmet-un::before { + content: "\e503"; } + +.fa-bullseye::before { + content: "\f140"; } + +.fa-bacon::before { + content: "\f7e5"; } + +.fa-hand-point-down::before { + content: "\f0a7"; } + +.fa-arrow-up-from-bracket::before { + content: "\e09a"; } + +.fa-folder::before { + content: "\f07b"; } + +.fa-folder-blank::before { + content: "\f07b"; } + +.fa-file-waveform::before { + content: "\f478"; } + +.fa-file-medical-alt::before { + content: "\f478"; } + +.fa-radiation::before { + content: "\f7b9"; } + +.fa-chart-simple::before { + content: "\e473"; } + +.fa-mars-stroke::before { + content: "\f229"; } + +.fa-vial::before { + content: "\f492"; } + +.fa-gauge::before { + content: "\f624"; } + +.fa-dashboard::before { + content: "\f624"; } + +.fa-gauge-med::before { + content: "\f624"; } + +.fa-tachometer-alt-average::before { + content: "\f624"; } + +.fa-wand-magic-sparkles::before { + content: "\e2ca"; } + +.fa-magic-wand-sparkles::before { + content: "\e2ca"; } + +.fa-e::before { + content: "\45"; } + +.fa-pen-clip::before { + content: "\f305"; } + +.fa-pen-alt::before { + content: "\f305"; } + +.fa-bridge-circle-exclamation::before { + content: "\e4ca"; } + +.fa-user::before { + content: "\f007"; } + +.fa-school-circle-check::before { + content: "\e56b"; } + +.fa-dumpster::before { + content: "\f793"; } + +.fa-van-shuttle::before { + content: "\f5b6"; } + +.fa-shuttle-van::before { + content: "\f5b6"; } + +.fa-building-user::before { + content: "\e4da"; } + +.fa-square-caret-left::before { + content: "\f191"; } + +.fa-caret-square-left::before { + content: "\f191"; } + +.fa-highlighter::before { + content: "\f591"; } + +.fa-key::before { + content: "\f084"; } + +.fa-bullhorn::before { + content: "\f0a1"; } + +.fa-globe::before { + content: "\f0ac"; } + +.fa-synagogue::before { + content: "\f69b"; } + +.fa-person-half-dress::before { + content: "\e548"; } + +.fa-road-bridge::before { + content: "\e563"; } + +.fa-location-arrow::before { + content: "\f124"; } + +.fa-c::before { + content: "\43"; } + +.fa-tablet-button::before { + content: "\f10a"; } + +.fa-building-lock::before { + content: "\e4d6"; } + +.fa-pizza-slice::before { + content: "\f818"; } + +.fa-money-bill-wave::before { + content: "\f53a"; } + +.fa-chart-area::before { + content: "\f1fe"; } + +.fa-area-chart::before { + content: "\f1fe"; } + +.fa-house-flag::before { + content: "\e50d"; } + +.fa-person-circle-minus::before { + content: "\e540"; } + +.fa-ban::before { + content: "\f05e"; } + +.fa-cancel::before { + content: "\f05e"; } + +.fa-camera-rotate::before { + content: "\e0d8"; } + +.fa-spray-can-sparkles::before { + content: "\f5d0"; } + +.fa-air-freshener::before { + content: "\f5d0"; } + +.fa-star::before { + content: "\f005"; } + +.fa-repeat::before { + content: "\f363"; } + +.fa-cross::before { + content: "\f654"; } + +.fa-box::before { + content: "\f466"; } + +.fa-venus-mars::before { + content: "\f228"; } + +.fa-arrow-pointer::before { + content: "\f245"; } + +.fa-mouse-pointer::before { + content: "\f245"; } + +.fa-maximize::before { + content: "\f31e"; } + +.fa-expand-arrows-alt::before { + content: "\f31e"; } + +.fa-charging-station::before { + content: "\f5e7"; } + +.fa-shapes::before { + content: "\f61f"; } + +.fa-triangle-circle-square::before { + content: "\f61f"; } + +.fa-shuffle::before { + content: "\f074"; } + +.fa-random::before { + content: "\f074"; } + +.fa-person-running::before { + content: "\f70c"; } + +.fa-running::before { + content: "\f70c"; } + +.fa-mobile-retro::before { + content: "\e527"; } + +.fa-grip-lines-vertical::before { + content: "\f7a5"; } + +.fa-spider::before { + content: "\f717"; } + +.fa-hands-bound::before { + content: "\e4f9"; } + +.fa-file-invoice-dollar::before { + content: "\f571"; } + +.fa-plane-circle-exclamation::before { + content: "\e556"; } + +.fa-x-ray::before { + content: "\f497"; } + +.fa-spell-check::before { + content: "\f891"; } + +.fa-slash::before { + content: "\f715"; } + +.fa-computer-mouse::before { + content: "\f8cc"; } + +.fa-mouse::before { + content: "\f8cc"; } + +.fa-arrow-right-to-bracket::before { + content: "\f090"; } + +.fa-sign-in::before { + content: "\f090"; } + +.fa-shop-slash::before { + content: "\e070"; } + +.fa-store-alt-slash::before { + content: "\e070"; } + +.fa-server::before { + content: "\f233"; } + +.fa-virus-covid-slash::before { + content: "\e4a9"; } + +.fa-shop-lock::before { + content: "\e4a5"; } + +.fa-hourglass-start::before { + content: "\f251"; } + +.fa-hourglass-1::before { + content: "\f251"; } + +.fa-blender-phone::before { + content: "\f6b6"; } + +.fa-building-wheat::before { + content: "\e4db"; } + +.fa-person-breastfeeding::before { + content: "\e53a"; } + +.fa-right-to-bracket::before { + content: "\f2f6"; } + +.fa-sign-in-alt::before { + content: "\f2f6"; } + +.fa-venus::before { + content: "\f221"; } + +.fa-passport::before { + content: "\f5ab"; } + +.fa-heart-pulse::before { + content: "\f21e"; } + +.fa-heartbeat::before { + content: "\f21e"; } + +.fa-people-carry-box::before { + content: "\f4ce"; } + +.fa-people-carry::before { + content: "\f4ce"; } + +.fa-temperature-high::before { + content: "\f769"; } + +.fa-microchip::before { + content: "\f2db"; } + +.fa-crown::before { + content: "\f521"; } + +.fa-weight-hanging::before { + content: "\f5cd"; } + +.fa-xmarks-lines::before { + content: "\e59a"; } + +.fa-file-prescription::before { + content: "\f572"; } + +.fa-weight-scale::before { + content: "\f496"; } + +.fa-weight::before { + content: "\f496"; } + +.fa-user-group::before { + content: "\f500"; } + +.fa-user-friends::before { + content: "\f500"; } + +.fa-arrow-up-a-z::before { + content: "\f15e"; } + +.fa-sort-alpha-up::before { + content: "\f15e"; } + +.fa-chess-knight::before { + content: "\f441"; } + +.fa-face-laugh-squint::before { + content: "\f59b"; } + +.fa-laugh-squint::before { + content: "\f59b"; } + +.fa-wheelchair::before { + content: "\f193"; } + +.fa-circle-arrow-up::before { + content: "\f0aa"; } + +.fa-arrow-circle-up::before { + content: "\f0aa"; } + +.fa-toggle-on::before { + content: "\f205"; } + +.fa-person-walking::before { + content: "\f554"; } + +.fa-walking::before { + content: "\f554"; } + +.fa-l::before { + content: "\4c"; } + +.fa-fire::before { + content: "\f06d"; } + +.fa-bed-pulse::before { + content: "\f487"; } + +.fa-procedures::before { + content: "\f487"; } + +.fa-shuttle-space::before { + content: "\f197"; } + +.fa-space-shuttle::before { + content: "\f197"; } + +.fa-face-laugh::before { + content: "\f599"; } + +.fa-laugh::before { + content: "\f599"; } + +.fa-folder-open::before { + content: "\f07c"; } + +.fa-heart-circle-plus::before { + content: "\e500"; } + +.fa-code-fork::before { + content: "\e13b"; } + +.fa-city::before { + content: "\f64f"; } + +.fa-microphone-lines::before { + content: "\f3c9"; } + +.fa-microphone-alt::before { + content: "\f3c9"; } + +.fa-pepper-hot::before { + content: "\f816"; } + +.fa-unlock::before { + content: "\f09c"; } + +.fa-colon-sign::before { + content: "\e140"; } + +.fa-headset::before { + content: "\f590"; } + +.fa-store-slash::before { + content: "\e071"; } + +.fa-road-circle-xmark::before { + content: "\e566"; } + +.fa-user-minus::before { + content: "\f503"; } + +.fa-mars-stroke-up::before { + content: "\f22a"; } + +.fa-mars-stroke-v::before { + content: "\f22a"; } + +.fa-champagne-glasses::before { + content: "\f79f"; } + +.fa-glass-cheers::before { + content: "\f79f"; } + +.fa-clipboard::before { + content: "\f328"; } + +.fa-house-circle-exclamation::before { + content: "\e50a"; } + +.fa-file-arrow-up::before { + content: "\f574"; } + +.fa-file-upload::before { + content: "\f574"; } + +.fa-wifi::before { + content: "\f1eb"; } + +.fa-wifi-3::before { + content: "\f1eb"; } + +.fa-wifi-strong::before { + content: "\f1eb"; } + +.fa-bath::before { + content: "\f2cd"; } + +.fa-bathtub::before { + content: "\f2cd"; } + +.fa-underline::before { + content: "\f0cd"; } + +.fa-user-pen::before { + content: "\f4ff"; } + +.fa-user-edit::before { + content: "\f4ff"; } + +.fa-signature::before { + content: "\f5b7"; } + +.fa-stroopwafel::before { + content: "\f551"; } + +.fa-bold::before { + content: "\f032"; } + +.fa-anchor-lock::before { + content: "\e4ad"; } + +.fa-building-ngo::before { + content: "\e4d7"; } + +.fa-manat-sign::before { + content: "\e1d5"; } + +.fa-not-equal::before { + content: "\f53e"; } + +.fa-border-top-left::before { + content: "\f853"; } + +.fa-border-style::before { + content: "\f853"; } + +.fa-map-location-dot::before { + content: "\f5a0"; } + +.fa-map-marked-alt::before { + content: "\f5a0"; } + +.fa-jedi::before { + content: "\f669"; } + +.fa-square-poll-vertical::before { + content: "\f681"; } + +.fa-poll::before { + content: "\f681"; } + +.fa-mug-hot::before { + content: "\f7b6"; } + +.fa-car-battery::before { + content: "\f5df"; } + +.fa-battery-car::before { + content: "\f5df"; } + +.fa-gift::before { + content: "\f06b"; } + +.fa-dice-two::before { + content: "\f528"; } + +.fa-chess-queen::before { + content: "\f445"; } + +.fa-glasses::before { + content: "\f530"; } + +.fa-chess-board::before { + content: "\f43c"; } + +.fa-building-circle-check::before { + content: "\e4d2"; } + +.fa-person-chalkboard::before { + content: "\e53d"; } + +.fa-mars-stroke-right::before { + content: "\f22b"; } + +.fa-mars-stroke-h::before { + content: "\f22b"; } + +.fa-hand-back-fist::before { + content: "\f255"; } + +.fa-hand-rock::before { + content: "\f255"; } + +.fa-square-caret-up::before { + content: "\f151"; } + +.fa-caret-square-up::before { + content: "\f151"; } + +.fa-cloud-showers-water::before { + content: "\e4e4"; } + +.fa-chart-bar::before { + content: "\f080"; } + +.fa-bar-chart::before { + content: "\f080"; } + +.fa-hands-bubbles::before { + content: "\e05e"; } + +.fa-hands-wash::before { + content: "\e05e"; } + +.fa-less-than-equal::before { + content: "\f537"; } + +.fa-train::before { + content: "\f238"; } + +.fa-eye-low-vision::before { + content: "\f2a8"; } + +.fa-low-vision::before { + content: "\f2a8"; } + +.fa-crow::before { + content: "\f520"; } + +.fa-sailboat::before { + content: "\e445"; } + +.fa-window-restore::before { + content: "\f2d2"; } + +.fa-square-plus::before { + content: "\f0fe"; } + +.fa-plus-square::before { + content: "\f0fe"; } + +.fa-torii-gate::before { + content: "\f6a1"; } + +.fa-frog::before { + content: "\f52e"; } + +.fa-bucket::before { + content: "\e4cf"; } + +.fa-image::before { + content: "\f03e"; } + +.fa-microphone::before { + content: "\f130"; } + +.fa-cow::before { + content: "\f6c8"; } + +.fa-caret-up::before { + content: "\f0d8"; } + +.fa-screwdriver::before { + content: "\f54a"; } + +.fa-folder-closed::before { + content: "\e185"; } + +.fa-house-tsunami::before { + content: "\e515"; } + +.fa-square-nfi::before { + content: "\e576"; } + +.fa-arrow-up-from-ground-water::before { + content: "\e4b5"; } + +.fa-martini-glass::before { + content: "\f57b"; } + +.fa-glass-martini-alt::before { + content: "\f57b"; } + +.fa-rotate-left::before { + content: "\f2ea"; } + +.fa-rotate-back::before { + content: "\f2ea"; } + +.fa-rotate-backward::before { + content: "\f2ea"; } + +.fa-undo-alt::before { + content: "\f2ea"; } + +.fa-table-columns::before { + content: "\f0db"; } + +.fa-columns::before { + content: "\f0db"; } + +.fa-lemon::before { + content: "\f094"; } + +.fa-head-side-mask::before { + content: "\e063"; } + +.fa-handshake::before { + content: "\f2b5"; } + +.fa-gem::before { + content: "\f3a5"; } + +.fa-dolly::before { + content: "\f472"; } + +.fa-dolly-box::before { + content: "\f472"; } + +.fa-smoking::before { + content: "\f48d"; } + +.fa-minimize::before { + content: "\f78c"; } + +.fa-compress-arrows-alt::before { + content: "\f78c"; } + +.fa-monument::before { + content: "\f5a6"; } + +.fa-snowplow::before { + content: "\f7d2"; } + +.fa-angles-right::before { + content: "\f101"; } + +.fa-angle-double-right::before { + content: "\f101"; } + +.fa-cannabis::before { + content: "\f55f"; } + +.fa-circle-play::before { + content: "\f144"; } + +.fa-play-circle::before { + content: "\f144"; } + +.fa-tablets::before { + content: "\f490"; } + +.fa-ethernet::before { + content: "\f796"; } + +.fa-euro-sign::before { + content: "\f153"; } + +.fa-eur::before { + content: "\f153"; } + +.fa-euro::before { + content: "\f153"; } + +.fa-chair::before { + content: "\f6c0"; } + +.fa-circle-check::before { + content: "\f058"; } + +.fa-check-circle::before { + content: "\f058"; } + +.fa-circle-stop::before { + content: "\f28d"; } + +.fa-stop-circle::before { + content: "\f28d"; } + +.fa-compass-drafting::before { + content: "\f568"; } + +.fa-drafting-compass::before { + content: "\f568"; } + +.fa-plate-wheat::before { + content: "\e55a"; } + +.fa-icicles::before { + content: "\f7ad"; } + +.fa-person-shelter::before { + content: "\e54f"; } + +.fa-neuter::before { + content: "\f22c"; } + +.fa-id-badge::before { + content: "\f2c1"; } + +.fa-marker::before { + content: "\f5a1"; } + +.fa-face-laugh-beam::before { + content: "\f59a"; } + +.fa-laugh-beam::before { + content: "\f59a"; } + +.fa-helicopter-symbol::before { + content: "\e502"; } + +.fa-universal-access::before { + content: "\f29a"; } + +.fa-circle-chevron-up::before { + content: "\f139"; } + +.fa-chevron-circle-up::before { + content: "\f139"; } + +.fa-lari-sign::before { + content: "\e1c8"; } + +.fa-volcano::before { + content: "\f770"; } + +.fa-person-walking-dashed-line-arrow-right::before { + content: "\e553"; } + +.fa-sterling-sign::before { + content: "\f154"; } + +.fa-gbp::before { + content: "\f154"; } + +.fa-pound-sign::before { + content: "\f154"; } + +.fa-viruses::before { + content: "\e076"; } + +.fa-square-person-confined::before { + content: "\e577"; } + +.fa-user-tie::before { + content: "\f508"; } + +.fa-arrow-down-long::before { + content: "\f175"; } + +.fa-long-arrow-down::before { + content: "\f175"; } + +.fa-tent-arrow-down-to-line::before { + content: "\e57e"; } + +.fa-certificate::before { + content: "\f0a3"; } + +.fa-reply-all::before { + content: "\f122"; } + +.fa-mail-reply-all::before { + content: "\f122"; } + +.fa-suitcase::before { + content: "\f0f2"; } + +.fa-person-skating::before { + content: "\f7c5"; } + +.fa-skating::before { + content: "\f7c5"; } + +.fa-filter-circle-dollar::before { + content: "\f662"; } + +.fa-funnel-dollar::before { + content: "\f662"; } + +.fa-camera-retro::before { + content: "\f083"; } + +.fa-circle-arrow-down::before { + content: "\f0ab"; } + +.fa-arrow-circle-down::before { + content: "\f0ab"; } + +.fa-file-import::before { + content: "\f56f"; } + +.fa-arrow-right-to-file::before { + content: "\f56f"; } + +.fa-square-arrow-up-right::before { + content: "\f14c"; } + +.fa-external-link-square::before { + content: "\f14c"; } + +.fa-box-open::before { + content: "\f49e"; } + +.fa-scroll::before { + content: "\f70e"; } + +.fa-spa::before { + content: "\f5bb"; } + +.fa-location-pin-lock::before { + content: "\e51f"; } + +.fa-pause::before { + content: "\f04c"; } + +.fa-hill-avalanche::before { + content: "\e507"; } + +.fa-temperature-empty::before { + content: "\f2cb"; } + +.fa-temperature-0::before { + content: "\f2cb"; } + +.fa-thermometer-0::before { + content: "\f2cb"; } + +.fa-thermometer-empty::before { + content: "\f2cb"; } + +.fa-bomb::before { + content: "\f1e2"; } + +.fa-registered::before { + content: "\f25d"; } + +.fa-address-card::before { + content: "\f2bb"; } + +.fa-contact-card::before { + content: "\f2bb"; } + +.fa-vcard::before { + content: "\f2bb"; } + +.fa-scale-unbalanced-flip::before { + content: "\f516"; } + +.fa-balance-scale-right::before { + content: "\f516"; } + +.fa-subscript::before { + content: "\f12c"; } + +.fa-diamond-turn-right::before { + content: "\f5eb"; } + +.fa-directions::before { + content: "\f5eb"; } + +.fa-burst::before { + content: "\e4dc"; } + +.fa-house-laptop::before { + content: "\e066"; } + +.fa-laptop-house::before { + content: "\e066"; } + +.fa-face-tired::before { + content: "\f5c8"; } + +.fa-tired::before { + content: "\f5c8"; } + +.fa-money-bills::before { + content: "\e1f3"; } + +.fa-smog::before { + content: "\f75f"; } + +.fa-crutch::before { + content: "\f7f7"; } + +.fa-cloud-arrow-up::before { + content: "\f0ee"; } + +.fa-cloud-upload::before { + content: "\f0ee"; } + +.fa-cloud-upload-alt::before { + content: "\f0ee"; } + +.fa-palette::before { + content: "\f53f"; } + +.fa-arrows-turn-right::before { + content: "\e4c0"; } + +.fa-vest::before { + content: "\e085"; } + +.fa-ferry::before { + content: "\e4ea"; } + +.fa-arrows-down-to-people::before { + content: "\e4b9"; } + +.fa-seedling::before { + content: "\f4d8"; } + +.fa-sprout::before { + content: "\f4d8"; } + +.fa-left-right::before { + content: "\f337"; } + +.fa-arrows-alt-h::before { + content: "\f337"; } + +.fa-boxes-packing::before { + content: "\e4c7"; } + +.fa-circle-arrow-left::before { + content: "\f0a8"; } + +.fa-arrow-circle-left::before { + content: "\f0a8"; } + +.fa-group-arrows-rotate::before { + content: "\e4f6"; } + +.fa-bowl-food::before { + content: "\e4c6"; } + +.fa-candy-cane::before { + content: "\f786"; } + +.fa-arrow-down-wide-short::before { + content: "\f160"; } + +.fa-sort-amount-asc::before { + content: "\f160"; } + +.fa-sort-amount-down::before { + content: "\f160"; } + +.fa-cloud-bolt::before { + content: "\f76c"; } + +.fa-thunderstorm::before { + content: "\f76c"; } + +.fa-text-slash::before { + content: "\f87d"; } + +.fa-remove-format::before { + content: "\f87d"; } + +.fa-face-smile-wink::before { + content: "\f4da"; } + +.fa-smile-wink::before { + content: "\f4da"; } + +.fa-file-word::before { + content: "\f1c2"; } + +.fa-file-powerpoint::before { + content: "\f1c4"; } + +.fa-arrows-left-right::before { + content: "\f07e"; } + +.fa-arrows-h::before { + content: "\f07e"; } + +.fa-house-lock::before { + content: "\e510"; } + +.fa-cloud-arrow-down::before { + content: "\f0ed"; } + +.fa-cloud-download::before { + content: "\f0ed"; } + +.fa-cloud-download-alt::before { + content: "\f0ed"; } + +.fa-children::before { + content: "\e4e1"; } + +.fa-chalkboard::before { + content: "\f51b"; } + +.fa-blackboard::before { + content: "\f51b"; } + +.fa-user-large-slash::before { + content: "\f4fa"; } + +.fa-user-alt-slash::before { + content: "\f4fa"; } + +.fa-envelope-open::before { + content: "\f2b6"; } + +.fa-handshake-simple-slash::before { + content: "\e05f"; } + +.fa-handshake-alt-slash::before { + content: "\e05f"; } + +.fa-mattress-pillow::before { + content: "\e525"; } + +.fa-guarani-sign::before { + content: "\e19a"; } + +.fa-arrows-rotate::before { + content: "\f021"; } + +.fa-refresh::before { + content: "\f021"; } + +.fa-sync::before { + content: "\f021"; } + +.fa-fire-extinguisher::before { + content: "\f134"; } + +.fa-cruzeiro-sign::before { + content: "\e152"; } + +.fa-greater-than-equal::before { + content: "\f532"; } + +.fa-shield-halved::before { + content: "\f3ed"; } + +.fa-shield-alt::before { + content: "\f3ed"; } + +.fa-book-atlas::before { + content: "\f558"; } + +.fa-atlas::before { + content: "\f558"; } + +.fa-virus::before { + content: "\e074"; } + +.fa-envelope-circle-check::before { + content: "\e4e8"; } + +.fa-layer-group::before { + content: "\f5fd"; } + +.fa-arrows-to-dot::before { + content: "\e4be"; } + +.fa-archway::before { + content: "\f557"; } + +.fa-heart-circle-check::before { + content: "\e4fd"; } + +.fa-house-chimney-crack::before { + content: "\f6f1"; } + +.fa-house-damage::before { + content: "\f6f1"; } + +.fa-file-zipper::before { + content: "\f1c6"; } + +.fa-file-archive::before { + content: "\f1c6"; } + +.fa-square::before { + content: "\f0c8"; } + +.fa-martini-glass-empty::before { + content: "\f000"; } + +.fa-glass-martini::before { + content: "\f000"; } + +.fa-couch::before { + content: "\f4b8"; } + +.fa-cedi-sign::before { + content: "\e0df"; } + +.fa-italic::before { + content: "\f033"; } + +.fa-church::before { + content: "\f51d"; } + +.fa-comments-dollar::before { + content: "\f653"; } + +.fa-democrat::before { + content: "\f747"; } + +.fa-z::before { + content: "\5a"; } + +.fa-person-skiing::before { + content: "\f7c9"; } + +.fa-skiing::before { + content: "\f7c9"; } + +.fa-road-lock::before { + content: "\e567"; } + +.fa-a::before { + content: "\41"; } + +.fa-temperature-arrow-down::before { + content: "\e03f"; } + +.fa-temperature-down::before { + content: "\e03f"; } + +.fa-feather-pointed::before { + content: "\f56b"; } + +.fa-feather-alt::before { + content: "\f56b"; } + +.fa-p::before { + content: "\50"; } + +.fa-snowflake::before { + content: "\f2dc"; } + +.fa-newspaper::before { + content: "\f1ea"; } + +.fa-rectangle-ad::before { + content: "\f641"; } + +.fa-ad::before { + content: "\f641"; } + +.fa-circle-arrow-right::before { + content: "\f0a9"; } + +.fa-arrow-circle-right::before { + content: "\f0a9"; } + +.fa-filter-circle-xmark::before { + content: "\e17b"; } + +.fa-locust::before { + content: "\e520"; } + +.fa-sort::before { + content: "\f0dc"; } + +.fa-unsorted::before { + content: "\f0dc"; } + +.fa-list-ol::before { + content: "\f0cb"; } + +.fa-list-1-2::before { + content: "\f0cb"; } + +.fa-list-numeric::before { + content: "\f0cb"; } + +.fa-person-dress-burst::before { + content: "\e544"; } + +.fa-money-check-dollar::before { + content: "\f53d"; } + +.fa-money-check-alt::before { + content: "\f53d"; } + +.fa-vector-square::before { + content: "\f5cb"; } + +.fa-bread-slice::before { + content: "\f7ec"; } + +.fa-language::before { + content: "\f1ab"; } + +.fa-face-kiss-wink-heart::before { + content: "\f598"; } + +.fa-kiss-wink-heart::before { + content: "\f598"; } + +.fa-filter::before { + content: "\f0b0"; } + +.fa-question::before { + content: "\3f"; } + +.fa-file-signature::before { + content: "\f573"; } + +.fa-up-down-left-right::before { + content: "\f0b2"; } + +.fa-arrows-alt::before { + content: "\f0b2"; } + +.fa-house-chimney-user::before { + content: "\e065"; } + +.fa-hand-holding-heart::before { + content: "\f4be"; } + +.fa-puzzle-piece::before { + content: "\f12e"; } + +.fa-money-check::before { + content: "\f53c"; } + +.fa-star-half-stroke::before { + content: "\f5c0"; } + +.fa-star-half-alt::before { + content: "\f5c0"; } + +.fa-code::before { + content: "\f121"; } + +.fa-whiskey-glass::before { + content: "\f7a0"; } + +.fa-glass-whiskey::before { + content: "\f7a0"; } + +.fa-building-circle-exclamation::before { + content: "\e4d3"; } + +.fa-magnifying-glass-chart::before { + content: "\e522"; } + +.fa-arrow-up-right-from-square::before { + content: "\f08e"; } + +.fa-external-link::before { + content: "\f08e"; } + +.fa-cubes-stacked::before { + content: "\e4e6"; } + +.fa-won-sign::before { + content: "\f159"; } + +.fa-krw::before { + content: "\f159"; } + +.fa-won::before { + content: "\f159"; } + +.fa-virus-covid::before { + content: "\e4a8"; } + +.fa-austral-sign::before { + content: "\e0a9"; } + +.fa-f::before { + content: "\46"; } + +.fa-leaf::before { + content: "\f06c"; } + +.fa-road::before { + content: "\f018"; } + +.fa-taxi::before { + content: "\f1ba"; } + +.fa-cab::before { + content: "\f1ba"; } + +.fa-person-circle-plus::before { + content: "\e541"; } + +.fa-chart-pie::before { + content: "\f200"; } + +.fa-pie-chart::before { + content: "\f200"; } + +.fa-bolt-lightning::before { + content: "\e0b7"; } + +.fa-sack-xmark::before { + content: "\e56a"; } + +.fa-file-excel::before { + content: "\f1c3"; } + +.fa-file-contract::before { + content: "\f56c"; } + +.fa-fish-fins::before { + content: "\e4f2"; } + +.fa-building-flag::before { + content: "\e4d5"; } + +.fa-face-grin-beam::before { + content: "\f582"; } + +.fa-grin-beam::before { + content: "\f582"; } + +.fa-object-ungroup::before { + content: "\f248"; } + +.fa-poop::before { + content: "\f619"; } + +.fa-location-pin::before { + content: "\f041"; } + +.fa-map-marker::before { + content: "\f041"; } + +.fa-kaaba::before { + content: "\f66b"; } + +.fa-toilet-paper::before { + content: "\f71e"; } + +.fa-helmet-safety::before { + content: "\f807"; } + +.fa-hard-hat::before { + content: "\f807"; } + +.fa-hat-hard::before { + content: "\f807"; } + +.fa-eject::before { + content: "\f052"; } + +.fa-circle-right::before { + content: "\f35a"; } + +.fa-arrow-alt-circle-right::before { + content: "\f35a"; } + +.fa-plane-circle-check::before { + content: "\e555"; } + +.fa-face-rolling-eyes::before { + content: "\f5a5"; } + +.fa-meh-rolling-eyes::before { + content: "\f5a5"; } + +.fa-object-group::before { + content: "\f247"; } + +.fa-chart-line::before { + content: "\f201"; } + +.fa-line-chart::before { + content: "\f201"; } + +.fa-mask-ventilator::before { + content: "\e524"; } + +.fa-arrow-right::before { + content: "\f061"; } + +.fa-signs-post::before { + content: "\f277"; } + +.fa-map-signs::before { + content: "\f277"; } + +.fa-cash-register::before { + content: "\f788"; } + +.fa-person-circle-question::before { + content: "\e542"; } + +.fa-h::before { + content: "\48"; } + +.fa-tarp::before { + content: "\e57b"; } + +.fa-screwdriver-wrench::before { + content: "\f7d9"; } + +.fa-tools::before { + content: "\f7d9"; } + +.fa-arrows-to-eye::before { + content: "\e4bf"; } + +.fa-plug-circle-bolt::before { + content: "\e55b"; } + +.fa-heart::before { + content: "\f004"; } + +.fa-mars-and-venus::before { + content: "\f224"; } + +.fa-house-user::before { + content: "\e1b0"; } + +.fa-home-user::before { + content: "\e1b0"; } + +.fa-dumpster-fire::before { + content: "\f794"; } + +.fa-house-crack::before { + content: "\e3b1"; } + +.fa-martini-glass-citrus::before { + content: "\f561"; } + +.fa-cocktail::before { + content: "\f561"; } + +.fa-face-surprise::before { + content: "\f5c2"; } + +.fa-surprise::before { + content: "\f5c2"; } + +.fa-bottle-water::before { + content: "\e4c5"; } + +.fa-circle-pause::before { + content: "\f28b"; } + +.fa-pause-circle::before { + content: "\f28b"; } + +.fa-toilet-paper-slash::before { + content: "\e072"; } + +.fa-apple-whole::before { + content: "\f5d1"; } + +.fa-apple-alt::before { + content: "\f5d1"; } + +.fa-kitchen-set::before { + content: "\e51a"; } + +.fa-r::before { + content: "\52"; } + +.fa-temperature-quarter::before { + content: "\f2ca"; } + +.fa-temperature-1::before { + content: "\f2ca"; } + +.fa-thermometer-1::before { + content: "\f2ca"; } + +.fa-thermometer-quarter::before { + content: "\f2ca"; } + +.fa-cube::before { + content: "\f1b2"; } + +.fa-bitcoin-sign::before { + content: "\e0b4"; } + +.fa-shield-dog::before { + content: "\e573"; } + +.fa-solar-panel::before { + content: "\f5ba"; } + +.fa-lock-open::before { + content: "\f3c1"; } + +.fa-elevator::before { + content: "\e16d"; } + +.fa-money-bill-transfer::before { + content: "\e528"; } + +.fa-money-bill-trend-up::before { + content: "\e529"; } + +.fa-house-flood-water-circle-arrow-right::before { + content: "\e50f"; } + +.fa-square-poll-horizontal::before { + content: "\f682"; } + +.fa-poll-h::before { + content: "\f682"; } + +.fa-circle::before { + content: "\f111"; } + +.fa-backward-fast::before { + content: "\f049"; } + +.fa-fast-backward::before { + content: "\f049"; } + +.fa-recycle::before { + content: "\f1b8"; } + +.fa-user-astronaut::before { + content: "\f4fb"; } + +.fa-plane-slash::before { + content: "\e069"; } + +.fa-trademark::before { + content: "\f25c"; } + +.fa-basketball::before { + content: "\f434"; } + +.fa-basketball-ball::before { + content: "\f434"; } + +.fa-satellite-dish::before { + content: "\f7c0"; } + +.fa-circle-up::before { + content: "\f35b"; } + +.fa-arrow-alt-circle-up::before { + content: "\f35b"; } + +.fa-mobile-screen-button::before { + content: "\f3cd"; } + +.fa-mobile-alt::before { + content: "\f3cd"; } + +.fa-volume-high::before { + content: "\f028"; } + +.fa-volume-up::before { + content: "\f028"; } + +.fa-users-rays::before { + content: "\e593"; } + +.fa-wallet::before { + content: "\f555"; } + +.fa-clipboard-check::before { + content: "\f46c"; } + +.fa-file-audio::before { + content: "\f1c7"; } + +.fa-burger::before { + content: "\f805"; } + +.fa-hamburger::before { + content: "\f805"; } + +.fa-wrench::before { + content: "\f0ad"; } + +.fa-bugs::before { + content: "\e4d0"; } + +.fa-rupee-sign::before { + content: "\f156"; } + +.fa-rupee::before { + content: "\f156"; } + +.fa-file-image::before { + content: "\f1c5"; } + +.fa-circle-question::before { + content: "\f059"; } + +.fa-question-circle::before { + content: "\f059"; } + +.fa-plane-departure::before { + content: "\f5b0"; } + +.fa-handshake-slash::before { + content: "\e060"; } + +.fa-book-bookmark::before { + content: "\e0bb"; } + +.fa-code-branch::before { + content: "\f126"; } + +.fa-hat-cowboy::before { + content: "\f8c0"; } + +.fa-bridge::before { + content: "\e4c8"; } + +.fa-phone-flip::before { + content: "\f879"; } + +.fa-phone-alt::before { + content: "\f879"; } + +.fa-truck-front::before { + content: "\e2b7"; } + +.fa-cat::before { + content: "\f6be"; } + +.fa-anchor-circle-exclamation::before { + content: "\e4ab"; } + +.fa-truck-field::before { + content: "\e58d"; } + +.fa-route::before { + content: "\f4d7"; } + +.fa-clipboard-question::before { + content: "\e4e3"; } + +.fa-panorama::before { + content: "\e209"; } + +.fa-comment-medical::before { + content: "\f7f5"; } + +.fa-teeth-open::before { + content: "\f62f"; } + +.fa-file-circle-minus::before { + content: "\e4ed"; } + +.fa-tags::before { + content: "\f02c"; } + +.fa-wine-glass::before { + content: "\f4e3"; } + +.fa-forward-fast::before { + content: "\f050"; } + +.fa-fast-forward::before { + content: "\f050"; } + +.fa-face-meh-blank::before { + content: "\f5a4"; } + +.fa-meh-blank::before { + content: "\f5a4"; } + +.fa-square-parking::before { + content: "\f540"; } + +.fa-parking::before { + content: "\f540"; } + +.fa-house-signal::before { + content: "\e012"; } + +.fa-bars-progress::before { + content: "\f828"; } + +.fa-tasks-alt::before { + content: "\f828"; } + +.fa-faucet-drip::before { + content: "\e006"; } + +.fa-cart-flatbed::before { + content: "\f474"; } + +.fa-dolly-flatbed::before { + content: "\f474"; } + +.fa-ban-smoking::before { + content: "\f54d"; } + +.fa-smoking-ban::before { + content: "\f54d"; } + +.fa-terminal::before { + content: "\f120"; } + +.fa-mobile-button::before { + content: "\f10b"; } + +.fa-house-medical-flag::before { + content: "\e514"; } + +.fa-basket-shopping::before { + content: "\f291"; } + +.fa-shopping-basket::before { + content: "\f291"; } + +.fa-tape::before { + content: "\f4db"; } + +.fa-bus-simple::before { + content: "\f55e"; } + +.fa-bus-alt::before { + content: "\f55e"; } + +.fa-eye::before { + content: "\f06e"; } + +.fa-face-sad-cry::before { + content: "\f5b3"; } + +.fa-sad-cry::before { + content: "\f5b3"; } + +.fa-audio-description::before { + content: "\f29e"; } + +.fa-person-military-to-person::before { + content: "\e54c"; } + +.fa-file-shield::before { + content: "\e4f0"; } + +.fa-user-slash::before { + content: "\f506"; } + +.fa-pen::before { + content: "\f304"; } + +.fa-tower-observation::before { + content: "\e586"; } + +.fa-file-code::before { + content: "\f1c9"; } + +.fa-signal::before { + content: "\f012"; } + +.fa-signal-5::before { + content: "\f012"; } + +.fa-signal-perfect::before { + content: "\f012"; } + +.fa-bus::before { + content: "\f207"; } + +.fa-heart-circle-xmark::before { + content: "\e501"; } + +.fa-house-chimney::before { + content: "\e3af"; } + +.fa-home-lg::before { + content: "\e3af"; } + +.fa-window-maximize::before { + content: "\f2d0"; } + +.fa-face-frown::before { + content: "\f119"; } + +.fa-frown::before { + content: "\f119"; } + +.fa-prescription::before { + content: "\f5b1"; } + +.fa-shop::before { + content: "\f54f"; } + +.fa-store-alt::before { + content: "\f54f"; } + +.fa-floppy-disk::before { + content: "\f0c7"; } + +.fa-save::before { + content: "\f0c7"; } + +.fa-vihara::before { + content: "\f6a7"; } + +.fa-scale-unbalanced::before { + content: "\f515"; } + +.fa-balance-scale-left::before { + content: "\f515"; } + +.fa-sort-up::before { + content: "\f0de"; } + +.fa-sort-asc::before { + content: "\f0de"; } + +.fa-comment-dots::before { + content: "\f4ad"; } + +.fa-commenting::before { + content: "\f4ad"; } + +.fa-plant-wilt::before { + content: "\e5aa"; } + +.fa-diamond::before { + content: "\f219"; } + +.fa-face-grin-squint::before { + content: "\f585"; } + +.fa-grin-squint::before { + content: "\f585"; } + +.fa-hand-holding-dollar::before { + content: "\f4c0"; } + +.fa-hand-holding-usd::before { + content: "\f4c0"; } + +.fa-bacterium::before { + content: "\e05a"; } + +.fa-hand-pointer::before { + content: "\f25a"; } + +.fa-drum-steelpan::before { + content: "\f56a"; } + +.fa-hand-scissors::before { + content: "\f257"; } + +.fa-hands-praying::before { + content: "\f684"; } + +.fa-praying-hands::before { + content: "\f684"; } + +.fa-arrow-rotate-right::before { + content: "\f01e"; } + +.fa-arrow-right-rotate::before { + content: "\f01e"; } + +.fa-arrow-rotate-forward::before { + content: "\f01e"; } + +.fa-redo::before { + content: "\f01e"; } + +.fa-biohazard::before { + content: "\f780"; } + +.fa-location-crosshairs::before { + content: "\f601"; } + +.fa-location::before { + content: "\f601"; } + +.fa-mars-double::before { + content: "\f227"; } + +.fa-child-dress::before { + content: "\e59c"; } + +.fa-users-between-lines::before { + content: "\e591"; } + +.fa-lungs-virus::before { + content: "\e067"; } + +.fa-face-grin-tears::before { + content: "\f588"; } + +.fa-grin-tears::before { + content: "\f588"; } + +.fa-phone::before { + content: "\f095"; } + +.fa-calendar-xmark::before { + content: "\f273"; } + +.fa-calendar-times::before { + content: "\f273"; } + +.fa-child-reaching::before { + content: "\e59d"; } + +.fa-head-side-virus::before { + content: "\e064"; } + +.fa-user-gear::before { + content: "\f4fe"; } + +.fa-user-cog::before { + content: "\f4fe"; } + +.fa-arrow-up-1-9::before { + content: "\f163"; } + +.fa-sort-numeric-up::before { + content: "\f163"; } + +.fa-door-closed::before { + content: "\f52a"; } + +.fa-shield-virus::before { + content: "\e06c"; } + +.fa-dice-six::before { + content: "\f526"; } + +.fa-mosquito-net::before { + content: "\e52c"; } + +.fa-bridge-water::before { + content: "\e4ce"; } + +.fa-person-booth::before { + content: "\f756"; } + +.fa-text-width::before { + content: "\f035"; } + +.fa-hat-wizard::before { + content: "\f6e8"; } + +.fa-pen-fancy::before { + content: "\f5ac"; } + +.fa-person-digging::before { + content: "\f85e"; } + +.fa-digging::before { + content: "\f85e"; } + +.fa-trash::before { + content: "\f1f8"; } + +.fa-gauge-simple::before { + content: "\f629"; } + +.fa-gauge-simple-med::before { + content: "\f629"; } + +.fa-tachometer-average::before { + content: "\f629"; } + +.fa-book-medical::before { + content: "\f7e6"; } + +.fa-poo::before { + content: "\f2fe"; } + +.fa-quote-right::before { + content: "\f10e"; } + +.fa-quote-right-alt::before { + content: "\f10e"; } + +.fa-shirt::before { + content: "\f553"; } + +.fa-t-shirt::before { + content: "\f553"; } + +.fa-tshirt::before { + content: "\f553"; } + +.fa-cubes::before { + content: "\f1b3"; } + +.fa-divide::before { + content: "\f529"; } + +.fa-tenge-sign::before { + content: "\f7d7"; } + +.fa-tenge::before { + content: "\f7d7"; } + +.fa-headphones::before { + content: "\f025"; } + +.fa-hands-holding::before { + content: "\f4c2"; } + +.fa-hands-clapping::before { + content: "\e1a8"; } + +.fa-republican::before { + content: "\f75e"; } + +.fa-arrow-left::before { + content: "\f060"; } + +.fa-person-circle-xmark::before { + content: "\e543"; } + +.fa-ruler::before { + content: "\f545"; } + +.fa-align-left::before { + content: "\f036"; } + +.fa-dice-d6::before { + content: "\f6d1"; } + +.fa-restroom::before { + content: "\f7bd"; } + +.fa-j::before { + content: "\4a"; } + +.fa-users-viewfinder::before { + content: "\e595"; } + +.fa-file-video::before { + content: "\f1c8"; } + +.fa-up-right-from-square::before { + content: "\f35d"; } + +.fa-external-link-alt::before { + content: "\f35d"; } + +.fa-table-cells::before { + content: "\f00a"; } + +.fa-th::before { + content: "\f00a"; } + +.fa-file-pdf::before { + content: "\f1c1"; } + +.fa-book-bible::before { + content: "\f647"; } + +.fa-bible::before { + content: "\f647"; } + +.fa-o::before { + content: "\4f"; } + +.fa-suitcase-medical::before { + content: "\f0fa"; } + +.fa-medkit::before { + content: "\f0fa"; } + +.fa-user-secret::before { + content: "\f21b"; } + +.fa-otter::before { + content: "\f700"; } + +.fa-person-dress::before { + content: "\f182"; } + +.fa-female::before { + content: "\f182"; } + +.fa-comment-dollar::before { + content: "\f651"; } + +.fa-business-time::before { + content: "\f64a"; } + +.fa-briefcase-clock::before { + content: "\f64a"; } + +.fa-table-cells-large::before { + content: "\f009"; } + +.fa-th-large::before { + content: "\f009"; } + +.fa-book-tanakh::before { + content: "\f827"; } + +.fa-tanakh::before { + content: "\f827"; } + +.fa-phone-volume::before { + content: "\f2a0"; } + +.fa-volume-control-phone::before { + content: "\f2a0"; } + +.fa-hat-cowboy-side::before { + content: "\f8c1"; } + +.fa-clipboard-user::before { + content: "\f7f3"; } + +.fa-child::before { + content: "\f1ae"; } + +.fa-lira-sign::before { + content: "\f195"; } + +.fa-satellite::before { + content: "\f7bf"; } + +.fa-plane-lock::before { + content: "\e558"; } + +.fa-tag::before { + content: "\f02b"; } + +.fa-comment::before { + content: "\f075"; } + +.fa-cake-candles::before { + content: "\f1fd"; } + +.fa-birthday-cake::before { + content: "\f1fd"; } + +.fa-cake::before { + content: "\f1fd"; } + +.fa-envelope::before { + content: "\f0e0"; } + +.fa-angles-up::before { + content: "\f102"; } + +.fa-angle-double-up::before { + content: "\f102"; } + +.fa-paperclip::before { + content: "\f0c6"; } + +.fa-arrow-right-to-city::before { + content: "\e4b3"; } + +.fa-ribbon::before { + content: "\f4d6"; } + +.fa-lungs::before { + content: "\f604"; } + +.fa-arrow-up-9-1::before { + content: "\f887"; } + +.fa-sort-numeric-up-alt::before { + content: "\f887"; } + +.fa-litecoin-sign::before { + content: "\e1d3"; } + +.fa-border-none::before { + content: "\f850"; } + +.fa-circle-nodes::before { + content: "\e4e2"; } + +.fa-parachute-box::before { + content: "\f4cd"; } + +.fa-indent::before { + content: "\f03c"; } + +.fa-truck-field-un::before { + content: "\e58e"; } + +.fa-hourglass::before { + content: "\f254"; } + +.fa-hourglass-empty::before { + content: "\f254"; } + +.fa-mountain::before { + content: "\f6fc"; } + +.fa-user-doctor::before { + content: "\f0f0"; } + +.fa-user-md::before { + content: "\f0f0"; } + +.fa-circle-info::before { + content: "\f05a"; } + +.fa-info-circle::before { + content: "\f05a"; } + +.fa-cloud-meatball::before { + content: "\f73b"; } + +.fa-camera::before { + content: "\f030"; } + +.fa-camera-alt::before { + content: "\f030"; } + +.fa-square-virus::before { + content: "\e578"; } + +.fa-meteor::before { + content: "\f753"; } + +.fa-car-on::before { + content: "\e4dd"; } + +.fa-sleigh::before { + content: "\f7cc"; } + +.fa-arrow-down-1-9::before { + content: "\f162"; } + +.fa-sort-numeric-asc::before { + content: "\f162"; } + +.fa-sort-numeric-down::before { + content: "\f162"; } + +.fa-hand-holding-droplet::before { + content: "\f4c1"; } + +.fa-hand-holding-water::before { + content: "\f4c1"; } + +.fa-water::before { + content: "\f773"; } + +.fa-calendar-check::before { + content: "\f274"; } + +.fa-braille::before { + content: "\f2a1"; } + +.fa-prescription-bottle-medical::before { + content: "\f486"; } + +.fa-prescription-bottle-alt::before { + content: "\f486"; } + +.fa-landmark::before { + content: "\f66f"; } + +.fa-truck::before { + content: "\f0d1"; } + +.fa-crosshairs::before { + content: "\f05b"; } + +.fa-person-cane::before { + content: "\e53c"; } + +.fa-tent::before { + content: "\e57d"; } + +.fa-vest-patches::before { + content: "\e086"; } + +.fa-check-double::before { + content: "\f560"; } + +.fa-arrow-down-a-z::before { + content: "\f15d"; } + +.fa-sort-alpha-asc::before { + content: "\f15d"; } + +.fa-sort-alpha-down::before { + content: "\f15d"; } + +.fa-money-bill-wheat::before { + content: "\e52a"; } + +.fa-cookie::before { + content: "\f563"; } + +.fa-arrow-rotate-left::before { + content: "\f0e2"; } + +.fa-arrow-left-rotate::before { + content: "\f0e2"; } + +.fa-arrow-rotate-back::before { + content: "\f0e2"; } + +.fa-arrow-rotate-backward::before { + content: "\f0e2"; } + +.fa-undo::before { + content: "\f0e2"; } + +.fa-hard-drive::before { + content: "\f0a0"; } + +.fa-hdd::before { + content: "\f0a0"; } + +.fa-face-grin-squint-tears::before { + content: "\f586"; } + +.fa-grin-squint-tears::before { + content: "\f586"; } + +.fa-dumbbell::before { + content: "\f44b"; } + +.fa-rectangle-list::before { + content: "\f022"; } + +.fa-list-alt::before { + content: "\f022"; } + +.fa-tarp-droplet::before { + content: "\e57c"; } + +.fa-house-medical-circle-check::before { + content: "\e511"; } + +.fa-person-skiing-nordic::before { + content: "\f7ca"; } + +.fa-skiing-nordic::before { + content: "\f7ca"; } + +.fa-calendar-plus::before { + content: "\f271"; } + +.fa-plane-arrival::before { + content: "\f5af"; } + +.fa-circle-left::before { + content: "\f359"; } + +.fa-arrow-alt-circle-left::before { + content: "\f359"; } + +.fa-train-subway::before { + content: "\f239"; } + +.fa-subway::before { + content: "\f239"; } + +.fa-chart-gantt::before { + content: "\e0e4"; } + +.fa-indian-rupee-sign::before { + content: "\e1bc"; } + +.fa-indian-rupee::before { + content: "\e1bc"; } + +.fa-inr::before { + content: "\e1bc"; } + +.fa-crop-simple::before { + content: "\f565"; } + +.fa-crop-alt::before { + content: "\f565"; } + +.fa-money-bill-1::before { + content: "\f3d1"; } + +.fa-money-bill-alt::before { + content: "\f3d1"; } + +.fa-left-long::before { + content: "\f30a"; } + +.fa-long-arrow-alt-left::before { + content: "\f30a"; } + +.fa-dna::before { + content: "\f471"; } + +.fa-virus-slash::before { + content: "\e075"; } + +.fa-minus::before { + content: "\f068"; } + +.fa-subtract::before { + content: "\f068"; } + +.fa-chess::before { + content: "\f439"; } + +.fa-arrow-left-long::before { + content: "\f177"; } + +.fa-long-arrow-left::before { + content: "\f177"; } + +.fa-plug-circle-check::before { + content: "\e55c"; } + +.fa-street-view::before { + content: "\f21d"; } + +.fa-franc-sign::before { + content: "\e18f"; } + +.fa-volume-off::before { + content: "\f026"; } + +.fa-hands-asl-interpreting::before { + content: "\f2a3"; } + +.fa-american-sign-language-interpreting::before { + content: "\f2a3"; } + +.fa-asl-interpreting::before { + content: "\f2a3"; } + +.fa-hands-american-sign-language-interpreting::before { + content: "\f2a3"; } + +.fa-gear::before { + content: "\f013"; } + +.fa-cog::before { + content: "\f013"; } + +.fa-droplet-slash::before { + content: "\f5c7"; } + +.fa-tint-slash::before { + content: "\f5c7"; } + +.fa-mosque::before { + content: "\f678"; } + +.fa-mosquito::before { + content: "\e52b"; } + +.fa-star-of-david::before { + content: "\f69a"; } + +.fa-person-military-rifle::before { + content: "\e54b"; } + +.fa-cart-shopping::before { + content: "\f07a"; } + +.fa-shopping-cart::before { + content: "\f07a"; } + +.fa-vials::before { + content: "\f493"; } + +.fa-plug-circle-plus::before { + content: "\e55f"; } + +.fa-place-of-worship::before { + content: "\f67f"; } + +.fa-grip-vertical::before { + content: "\f58e"; } + +.fa-arrow-turn-up::before { + content: "\f148"; } + +.fa-level-up::before { + content: "\f148"; } + +.fa-u::before { + content: "\55"; } + +.fa-square-root-variable::before { + content: "\f698"; } + +.fa-square-root-alt::before { + content: "\f698"; } + +.fa-clock::before { + content: "\f017"; } + +.fa-clock-four::before { + content: "\f017"; } + +.fa-backward-step::before { + content: "\f048"; } + +.fa-step-backward::before { + content: "\f048"; } + +.fa-pallet::before { + content: "\f482"; } + +.fa-faucet::before { + content: "\e005"; } + +.fa-baseball-bat-ball::before { + content: "\f432"; } + +.fa-s::before { + content: "\53"; } + +.fa-timeline::before { + content: "\e29c"; } + +.fa-keyboard::before { + content: "\f11c"; } + +.fa-caret-down::before { + content: "\f0d7"; } + +.fa-house-chimney-medical::before { + content: "\f7f2"; } + +.fa-clinic-medical::before { + content: "\f7f2"; } + +.fa-temperature-three-quarters::before { + content: "\f2c8"; } + +.fa-temperature-3::before { + content: "\f2c8"; } + +.fa-thermometer-3::before { + content: "\f2c8"; } + +.fa-thermometer-three-quarters::before { + content: "\f2c8"; } + +.fa-mobile-screen::before { + content: "\f3cf"; } + +.fa-mobile-android-alt::before { + content: "\f3cf"; } + +.fa-plane-up::before { + content: "\e22d"; } + +.fa-piggy-bank::before { + content: "\f4d3"; } + +.fa-battery-half::before { + content: "\f242"; } + +.fa-battery-3::before { + content: "\f242"; } + +.fa-mountain-city::before { + content: "\e52e"; } + +.fa-coins::before { + content: "\f51e"; } + +.fa-khanda::before { + content: "\f66d"; } + +.fa-sliders::before { + content: "\f1de"; } + +.fa-sliders-h::before { + content: "\f1de"; } + +.fa-folder-tree::before { + content: "\f802"; } + +.fa-network-wired::before { + content: "\f6ff"; } + +.fa-map-pin::before { + content: "\f276"; } + +.fa-hamsa::before { + content: "\f665"; } + +.fa-cent-sign::before { + content: "\e3f5"; } + +.fa-flask::before { + content: "\f0c3"; } + +.fa-person-pregnant::before { + content: "\e31e"; } + +.fa-wand-sparkles::before { + content: "\f72b"; } + +.fa-ellipsis-vertical::before { + content: "\f142"; } + +.fa-ellipsis-v::before { + content: "\f142"; } + +.fa-ticket::before { + content: "\f145"; } + +.fa-power-off::before { + content: "\f011"; } + +.fa-right-long::before { + content: "\f30b"; } + +.fa-long-arrow-alt-right::before { + content: "\f30b"; } + +.fa-flag-usa::before { + content: "\f74d"; } + +.fa-laptop-file::before { + content: "\e51d"; } + +.fa-tty::before { + content: "\f1e4"; } + +.fa-teletype::before { + content: "\f1e4"; } + +.fa-diagram-next::before { + content: "\e476"; } + +.fa-person-rifle::before { + content: "\e54e"; } + +.fa-house-medical-circle-exclamation::before { + content: "\e512"; } + +.fa-closed-captioning::before { + content: "\f20a"; } + +.fa-person-hiking::before { + content: "\f6ec"; } + +.fa-hiking::before { + content: "\f6ec"; } + +.fa-venus-double::before { + content: "\f226"; } + +.fa-images::before { + content: "\f302"; } + +.fa-calculator::before { + content: "\f1ec"; } + +.fa-people-pulling::before { + content: "\e535"; } + +.fa-n::before { + content: "\4e"; } + +.fa-cable-car::before { + content: "\f7da"; } + +.fa-tram::before { + content: "\f7da"; } + +.fa-cloud-rain::before { + content: "\f73d"; } + +.fa-building-circle-xmark::before { + content: "\e4d4"; } + +.fa-ship::before { + content: "\f21a"; } + +.fa-arrows-down-to-line::before { + content: "\e4b8"; } + +.fa-download::before { + content: "\f019"; } + +.fa-face-grin::before { + content: "\f580"; } + +.fa-grin::before { + content: "\f580"; } + +.fa-delete-left::before { + content: "\f55a"; } + +.fa-backspace::before { + content: "\f55a"; } + +.fa-eye-dropper::before { + content: "\f1fb"; } + +.fa-eye-dropper-empty::before { + content: "\f1fb"; } + +.fa-eyedropper::before { + content: "\f1fb"; } + +.fa-file-circle-check::before { + content: "\e5a0"; } + +.fa-forward::before { + content: "\f04e"; } + +.fa-mobile::before { + content: "\f3ce"; } + +.fa-mobile-android::before { + content: "\f3ce"; } + +.fa-mobile-phone::before { + content: "\f3ce"; } + +.fa-face-meh::before { + content: "\f11a"; } + +.fa-meh::before { + content: "\f11a"; } + +.fa-align-center::before { + content: "\f037"; } + +.fa-book-skull::before { + content: "\f6b7"; } + +.fa-book-dead::before { + content: "\f6b7"; } + +.fa-id-card::before { + content: "\f2c2"; } + +.fa-drivers-license::before { + content: "\f2c2"; } + +.fa-outdent::before { + content: "\f03b"; } + +.fa-dedent::before { + content: "\f03b"; } + +.fa-heart-circle-exclamation::before { + content: "\e4fe"; } + +.fa-house::before { + content: "\f015"; } + +.fa-home::before { + content: "\f015"; } + +.fa-home-alt::before { + content: "\f015"; } + +.fa-home-lg-alt::before { + content: "\f015"; } + +.fa-calendar-week::before { + content: "\f784"; } + +.fa-laptop-medical::before { + content: "\f812"; } + +.fa-b::before { + content: "\42"; } + +.fa-file-medical::before { + content: "\f477"; } + +.fa-dice-one::before { + content: "\f525"; } + +.fa-kiwi-bird::before { + content: "\f535"; } + +.fa-arrow-right-arrow-left::before { + content: "\f0ec"; } + +.fa-exchange::before { + content: "\f0ec"; } + +.fa-rotate-right::before { + content: "\f2f9"; } + +.fa-redo-alt::before { + content: "\f2f9"; } + +.fa-rotate-forward::before { + content: "\f2f9"; } + +.fa-utensils::before { + content: "\f2e7"; } + +.fa-cutlery::before { + content: "\f2e7"; } + +.fa-arrow-up-wide-short::before { + content: "\f161"; } + +.fa-sort-amount-up::before { + content: "\f161"; } + +.fa-mill-sign::before { + content: "\e1ed"; } + +.fa-bowl-rice::before { + content: "\e2eb"; } + +.fa-skull::before { + content: "\f54c"; } + +.fa-tower-broadcast::before { + content: "\f519"; } + +.fa-broadcast-tower::before { + content: "\f519"; } + +.fa-truck-pickup::before { + content: "\f63c"; } + +.fa-up-long::before { + content: "\f30c"; } + +.fa-long-arrow-alt-up::before { + content: "\f30c"; } + +.fa-stop::before { + content: "\f04d"; } + +.fa-code-merge::before { + content: "\f387"; } + +.fa-upload::before { + content: "\f093"; } + +.fa-hurricane::before { + content: "\f751"; } + +.fa-mound::before { + content: "\e52d"; } + +.fa-toilet-portable::before { + content: "\e583"; } + +.fa-compact-disc::before { + content: "\f51f"; } + +.fa-file-arrow-down::before { + content: "\f56d"; } + +.fa-file-download::before { + content: "\f56d"; } + +.fa-caravan::before { + content: "\f8ff"; } + +.fa-shield-cat::before { + content: "\e572"; } + +.fa-bolt::before { + content: "\f0e7"; } + +.fa-zap::before { + content: "\f0e7"; } + +.fa-glass-water::before { + content: "\e4f4"; } + +.fa-oil-well::before { + content: "\e532"; } + +.fa-vault::before { + content: "\e2c5"; } + +.fa-mars::before { + content: "\f222"; } + +.fa-toilet::before { + content: "\f7d8"; } + +.fa-plane-circle-xmark::before { + content: "\e557"; } + +.fa-yen-sign::before { + content: "\f157"; } + +.fa-cny::before { + content: "\f157"; } + +.fa-jpy::before { + content: "\f157"; } + +.fa-rmb::before { + content: "\f157"; } + +.fa-yen::before { + content: "\f157"; } + +.fa-ruble-sign::before { + content: "\f158"; } + +.fa-rouble::before { + content: "\f158"; } + +.fa-rub::before { + content: "\f158"; } + +.fa-ruble::before { + content: "\f158"; } + +.fa-sun::before { + content: "\f185"; } + +.fa-guitar::before { + content: "\f7a6"; } + +.fa-face-laugh-wink::before { + content: "\f59c"; } + +.fa-laugh-wink::before { + content: "\f59c"; } + +.fa-horse-head::before { + content: "\f7ab"; } + +.fa-bore-hole::before { + content: "\e4c3"; } + +.fa-industry::before { + content: "\f275"; } + +.fa-circle-down::before { + content: "\f358"; } + +.fa-arrow-alt-circle-down::before { + content: "\f358"; } + +.fa-arrows-turn-to-dots::before { + content: "\e4c1"; } + +.fa-florin-sign::before { + content: "\e184"; } + +.fa-arrow-down-short-wide::before { + content: "\f884"; } + +.fa-sort-amount-desc::before { + content: "\f884"; } + +.fa-sort-amount-down-alt::before { + content: "\f884"; } + +.fa-less-than::before { + content: "\3c"; } + +.fa-angle-down::before { + content: "\f107"; } + +.fa-car-tunnel::before { + content: "\e4de"; } + +.fa-head-side-cough::before { + content: "\e061"; } + +.fa-grip-lines::before { + content: "\f7a4"; } + +.fa-thumbs-down::before { + content: "\f165"; } + +.fa-user-lock::before { + content: "\f502"; } + +.fa-arrow-right-long::before { + content: "\f178"; } + +.fa-long-arrow-right::before { + content: "\f178"; } + +.fa-anchor-circle-xmark::before { + content: "\e4ac"; } + +.fa-ellipsis::before { + content: "\f141"; } + +.fa-ellipsis-h::before { + content: "\f141"; } + +.fa-chess-pawn::before { + content: "\f443"; } + +.fa-kit-medical::before { + content: "\f479"; } + +.fa-first-aid::before { + content: "\f479"; } + +.fa-person-through-window::before { + content: "\e5a9"; } + +.fa-toolbox::before { + content: "\f552"; } + +.fa-hands-holding-circle::before { + content: "\e4fb"; } + +.fa-bug::before { + content: "\f188"; } + +.fa-credit-card::before { + content: "\f09d"; } + +.fa-credit-card-alt::before { + content: "\f09d"; } + +.fa-car::before { + content: "\f1b9"; } + +.fa-automobile::before { + content: "\f1b9"; } + +.fa-hand-holding-hand::before { + content: "\e4f7"; } + +.fa-book-open-reader::before { + content: "\f5da"; } + +.fa-book-reader::before { + content: "\f5da"; } + +.fa-mountain-sun::before { + content: "\e52f"; } + +.fa-arrows-left-right-to-line::before { + content: "\e4ba"; } + +.fa-dice-d20::before { + content: "\f6cf"; } + +.fa-truck-droplet::before { + content: "\e58c"; } + +.fa-file-circle-xmark::before { + content: "\e5a1"; } + +.fa-temperature-arrow-up::before { + content: "\e040"; } + +.fa-temperature-up::before { + content: "\e040"; } + +.fa-medal::before { + content: "\f5a2"; } + +.fa-bed::before { + content: "\f236"; } + +.fa-square-h::before { + content: "\f0fd"; } + +.fa-h-square::before { + content: "\f0fd"; } + +.fa-podcast::before { + content: "\f2ce"; } + +.fa-temperature-full::before { + content: "\f2c7"; } + +.fa-temperature-4::before { + content: "\f2c7"; } + +.fa-thermometer-4::before { + content: "\f2c7"; } + +.fa-thermometer-full::before { + content: "\f2c7"; } + +.fa-bell::before { + content: "\f0f3"; } + +.fa-superscript::before { + content: "\f12b"; } + +.fa-plug-circle-xmark::before { + content: "\e560"; } + +.fa-star-of-life::before { + content: "\f621"; } + +.fa-phone-slash::before { + content: "\f3dd"; } + +.fa-paint-roller::before { + content: "\f5aa"; } + +.fa-handshake-angle::before { + content: "\f4c4"; } + +.fa-hands-helping::before { + content: "\f4c4"; } + +.fa-location-dot::before { + content: "\f3c5"; } + +.fa-map-marker-alt::before { + content: "\f3c5"; } + +.fa-file::before { + content: "\f15b"; } + +.fa-greater-than::before { + content: "\3e"; } + +.fa-person-swimming::before { + content: "\f5c4"; } + +.fa-swimmer::before { + content: "\f5c4"; } + +.fa-arrow-down::before { + content: "\f063"; } + +.fa-droplet::before { + content: "\f043"; } + +.fa-tint::before { + content: "\f043"; } + +.fa-eraser::before { + content: "\f12d"; } + +.fa-earth-americas::before { + content: "\f57d"; } + +.fa-earth::before { + content: "\f57d"; } + +.fa-earth-america::before { + content: "\f57d"; } + +.fa-globe-americas::before { + content: "\f57d"; } + +.fa-person-burst::before { + content: "\e53b"; } + +.fa-dove::before { + content: "\f4ba"; } + +.fa-battery-empty::before { + content: "\f244"; } + +.fa-battery-0::before { + content: "\f244"; } + +.fa-socks::before { + content: "\f696"; } + +.fa-inbox::before { + content: "\f01c"; } + +.fa-section::before { + content: "\e447"; } + +.fa-gauge-high::before { + content: "\f625"; } + +.fa-tachometer-alt::before { + content: "\f625"; } + +.fa-tachometer-alt-fast::before { + content: "\f625"; } + +.fa-envelope-open-text::before { + content: "\f658"; } + +.fa-hospital::before { + content: "\f0f8"; } + +.fa-hospital-alt::before { + content: "\f0f8"; } + +.fa-hospital-wide::before { + content: "\f0f8"; } + +.fa-wine-bottle::before { + content: "\f72f"; } + +.fa-chess-rook::before { + content: "\f447"; } + +.fa-bars-staggered::before { + content: "\f550"; } + +.fa-reorder::before { + content: "\f550"; } + +.fa-stream::before { + content: "\f550"; } + +.fa-dharmachakra::before { + content: "\f655"; } + +.fa-hotdog::before { + content: "\f80f"; } + +.fa-person-walking-with-cane::before { + content: "\f29d"; } + +.fa-blind::before { + content: "\f29d"; } + +.fa-drum::before { + content: "\f569"; } + +.fa-ice-cream::before { + content: "\f810"; } + +.fa-heart-circle-bolt::before { + content: "\e4fc"; } + +.fa-fax::before { + content: "\f1ac"; } + +.fa-paragraph::before { + content: "\f1dd"; } + +.fa-check-to-slot::before { + content: "\f772"; } + +.fa-vote-yea::before { + content: "\f772"; } + +.fa-star-half::before { + content: "\f089"; } + +.fa-boxes-stacked::before { + content: "\f468"; } + +.fa-boxes::before { + content: "\f468"; } + +.fa-boxes-alt::before { + content: "\f468"; } + +.fa-link::before { + content: "\f0c1"; } + +.fa-chain::before { + content: "\f0c1"; } + +.fa-ear-listen::before { + content: "\f2a2"; } + +.fa-assistive-listening-systems::before { + content: "\f2a2"; } + +.fa-tree-city::before { + content: "\e587"; } + +.fa-play::before { + content: "\f04b"; } + +.fa-font::before { + content: "\f031"; } + +.fa-rupiah-sign::before { + content: "\e23d"; } + +.fa-magnifying-glass::before { + content: "\f002"; } + +.fa-search::before { + content: "\f002"; } + +.fa-table-tennis-paddle-ball::before { + content: "\f45d"; } + +.fa-ping-pong-paddle-ball::before { + content: "\f45d"; } + +.fa-table-tennis::before { + content: "\f45d"; } + +.fa-person-dots-from-line::before { + content: "\f470"; } + +.fa-diagnoses::before { + content: "\f470"; } + +.fa-trash-can-arrow-up::before { + content: "\f82a"; } + +.fa-trash-restore-alt::before { + content: "\f82a"; } + +.fa-naira-sign::before { + content: "\e1f6"; } + +.fa-cart-arrow-down::before { + content: "\f218"; } + +.fa-walkie-talkie::before { + content: "\f8ef"; } + +.fa-file-pen::before { + content: "\f31c"; } + +.fa-file-edit::before { + content: "\f31c"; } + +.fa-receipt::before { + content: "\f543"; } + +.fa-square-pen::before { + content: "\f14b"; } + +.fa-pen-square::before { + content: "\f14b"; } + +.fa-pencil-square::before { + content: "\f14b"; } + +.fa-suitcase-rolling::before { + content: "\f5c1"; } + +.fa-person-circle-exclamation::before { + content: "\e53f"; } + +.fa-chevron-down::before { + content: "\f078"; } + +.fa-battery-full::before { + content: "\f240"; } + +.fa-battery::before { + content: "\f240"; } + +.fa-battery-5::before { + content: "\f240"; } + +.fa-skull-crossbones::before { + content: "\f714"; } + +.fa-code-compare::before { + content: "\e13a"; } + +.fa-list-ul::before { + content: "\f0ca"; } + +.fa-list-dots::before { + content: "\f0ca"; } + +.fa-school-lock::before { + content: "\e56f"; } + +.fa-tower-cell::before { + content: "\e585"; } + +.fa-down-long::before { + content: "\f309"; } + +.fa-long-arrow-alt-down::before { + content: "\f309"; } + +.fa-ranking-star::before { + content: "\e561"; } + +.fa-chess-king::before { + content: "\f43f"; } + +.fa-person-harassing::before { + content: "\e549"; } + +.fa-brazilian-real-sign::before { + content: "\e46c"; } + +.fa-landmark-dome::before { + content: "\f752"; } + +.fa-landmark-alt::before { + content: "\f752"; } + +.fa-arrow-up::before { + content: "\f062"; } + +.fa-tv::before { + content: "\f26c"; } + +.fa-television::before { + content: "\f26c"; } + +.fa-tv-alt::before { + content: "\f26c"; } + +.fa-shrimp::before { + content: "\e448"; } + +.fa-list-check::before { + content: "\f0ae"; } + +.fa-tasks::before { + content: "\f0ae"; } + +.fa-jug-detergent::before { + content: "\e519"; } + +.fa-circle-user::before { + content: "\f2bd"; } + +.fa-user-circle::before { + content: "\f2bd"; } + +.fa-user-shield::before { + content: "\f505"; } + +.fa-wind::before { + content: "\f72e"; } + +.fa-car-burst::before { + content: "\f5e1"; } + +.fa-car-crash::before { + content: "\f5e1"; } + +.fa-y::before { + content: "\59"; } + +.fa-person-snowboarding::before { + content: "\f7ce"; } + +.fa-snowboarding::before { + content: "\f7ce"; } + +.fa-truck-fast::before { + content: "\f48b"; } + +.fa-shipping-fast::before { + content: "\f48b"; } + +.fa-fish::before { + content: "\f578"; } + +.fa-user-graduate::before { + content: "\f501"; } + +.fa-circle-half-stroke::before { + content: "\f042"; } + +.fa-adjust::before { + content: "\f042"; } + +.fa-clapperboard::before { + content: "\e131"; } + +.fa-circle-radiation::before { + content: "\f7ba"; } + +.fa-radiation-alt::before { + content: "\f7ba"; } + +.fa-baseball::before { + content: "\f433"; } + +.fa-baseball-ball::before { + content: "\f433"; } + +.fa-jet-fighter-up::before { + content: "\e518"; } + +.fa-diagram-project::before { + content: "\f542"; } + +.fa-project-diagram::before { + content: "\f542"; } + +.fa-copy::before { + content: "\f0c5"; } + +.fa-volume-xmark::before { + content: "\f6a9"; } + +.fa-volume-mute::before { + content: "\f6a9"; } + +.fa-volume-times::before { + content: "\f6a9"; } + +.fa-hand-sparkles::before { + content: "\e05d"; } + +.fa-grip::before { + content: "\f58d"; } + +.fa-grip-horizontal::before { + content: "\f58d"; } + +.fa-share-from-square::before { + content: "\f14d"; } + +.fa-share-square::before { + content: "\f14d"; } + +.fa-child-combatant::before { + content: "\e4e0"; } + +.fa-child-rifle::before { + content: "\e4e0"; } + +.fa-gun::before { + content: "\e19b"; } + +.fa-square-phone::before { + content: "\f098"; } + +.fa-phone-square::before { + content: "\f098"; } + +.fa-plus::before { + content: "\2b"; } + +.fa-add::before { + content: "\2b"; } + +.fa-expand::before { + content: "\f065"; } + +.fa-computer::before { + content: "\e4e5"; } + +.fa-xmark::before { + content: "\f00d"; } + +.fa-close::before { + content: "\f00d"; } + +.fa-multiply::before { + content: "\f00d"; } + +.fa-remove::before { + content: "\f00d"; } + +.fa-times::before { + content: "\f00d"; } + +.fa-arrows-up-down-left-right::before { + content: "\f047"; } + +.fa-arrows::before { + content: "\f047"; } + +.fa-chalkboard-user::before { + content: "\f51c"; } + +.fa-chalkboard-teacher::before { + content: "\f51c"; } + +.fa-peso-sign::before { + content: "\e222"; } + +.fa-building-shield::before { + content: "\e4d8"; } + +.fa-baby::before { + content: "\f77c"; } + +.fa-users-line::before { + content: "\e592"; } + +.fa-quote-left::before { + content: "\f10d"; } + +.fa-quote-left-alt::before { + content: "\f10d"; } + +.fa-tractor::before { + content: "\f722"; } + +.fa-trash-arrow-up::before { + content: "\f829"; } + +.fa-trash-restore::before { + content: "\f829"; } + +.fa-arrow-down-up-lock::before { + content: "\e4b0"; } + +.fa-lines-leaning::before { + content: "\e51e"; } + +.fa-ruler-combined::before { + content: "\f546"; } + +.fa-copyright::before { + content: "\f1f9"; } + +.fa-equals::before { + content: "\3d"; } + +.fa-blender::before { + content: "\f517"; } + +.fa-teeth::before { + content: "\f62e"; } + +.fa-shekel-sign::before { + content: "\f20b"; } + +.fa-ils::before { + content: "\f20b"; } + +.fa-shekel::before { + content: "\f20b"; } + +.fa-sheqel::before { + content: "\f20b"; } + +.fa-sheqel-sign::before { + content: "\f20b"; } + +.fa-map::before { + content: "\f279"; } + +.fa-rocket::before { + content: "\f135"; } + +.fa-photo-film::before { + content: "\f87c"; } + +.fa-photo-video::before { + content: "\f87c"; } + +.fa-folder-minus::before { + content: "\f65d"; } + +.fa-store::before { + content: "\f54e"; } + +.fa-arrow-trend-up::before { + content: "\e098"; } + +.fa-plug-circle-minus::before { + content: "\e55e"; } + +.fa-sign-hanging::before { + content: "\f4d9"; } + +.fa-sign::before { + content: "\f4d9"; } + +.fa-bezier-curve::before { + content: "\f55b"; } + +.fa-bell-slash::before { + content: "\f1f6"; } + +.fa-tablet::before { + content: "\f3fb"; } + +.fa-tablet-android::before { + content: "\f3fb"; } + +.fa-school-flag::before { + content: "\e56e"; } + +.fa-fill::before { + content: "\f575"; } + +.fa-angle-up::before { + content: "\f106"; } + +.fa-drumstick-bite::before { + content: "\f6d7"; } + +.fa-holly-berry::before { + content: "\f7aa"; } + +.fa-chevron-left::before { + content: "\f053"; } + +.fa-bacteria::before { + content: "\e059"; } + +.fa-hand-lizard::before { + content: "\f258"; } + +.fa-notdef::before { + content: "\e1fe"; } + +.fa-disease::before { + content: "\f7fa"; } + +.fa-briefcase-medical::before { + content: "\f469"; } + +.fa-genderless::before { + content: "\f22d"; } + +.fa-chevron-right::before { + content: "\f054"; } + +.fa-retweet::before { + content: "\f079"; } + +.fa-car-rear::before { + content: "\f5de"; } + +.fa-car-alt::before { + content: "\f5de"; } + +.fa-pump-soap::before { + content: "\e06b"; } + +.fa-video-slash::before { + content: "\f4e2"; } + +.fa-battery-quarter::before { + content: "\f243"; } + +.fa-battery-2::before { + content: "\f243"; } + +.fa-radio::before { + content: "\f8d7"; } + +.fa-baby-carriage::before { + content: "\f77d"; } + +.fa-carriage-baby::before { + content: "\f77d"; } + +.fa-traffic-light::before { + content: "\f637"; } + +.fa-thermometer::before { + content: "\f491"; } + +.fa-vr-cardboard::before { + content: "\f729"; } + +.fa-hand-middle-finger::before { + content: "\f806"; } + +.fa-percent::before { + content: "\25"; } + +.fa-percentage::before { + content: "\25"; } + +.fa-truck-moving::before { + content: "\f4df"; } + +.fa-glass-water-droplet::before { + content: "\e4f5"; } + +.fa-display::before { + content: "\e163"; } + +.fa-face-smile::before { + content: "\f118"; } + +.fa-smile::before { + content: "\f118"; } + +.fa-thumbtack::before { + content: "\f08d"; } + +.fa-thumb-tack::before { + content: "\f08d"; } + +.fa-trophy::before { + content: "\f091"; } + +.fa-person-praying::before { + content: "\f683"; } + +.fa-pray::before { + content: "\f683"; } + +.fa-hammer::before { + content: "\f6e3"; } + +.fa-hand-peace::before { + content: "\f25b"; } + +.fa-rotate::before { + content: "\f2f1"; } + +.fa-sync-alt::before { + content: "\f2f1"; } + +.fa-spinner::before { + content: "\f110"; } + +.fa-robot::before { + content: "\f544"; } + +.fa-peace::before { + content: "\f67c"; } + +.fa-gears::before { + content: "\f085"; } + +.fa-cogs::before { + content: "\f085"; } + +.fa-warehouse::before { + content: "\f494"; } + +.fa-arrow-up-right-dots::before { + content: "\e4b7"; } + +.fa-splotch::before { + content: "\f5bc"; } + +.fa-face-grin-hearts::before { + content: "\f584"; } + +.fa-grin-hearts::before { + content: "\f584"; } + +.fa-dice-four::before { + content: "\f524"; } + +.fa-sim-card::before { + content: "\f7c4"; } + +.fa-transgender::before { + content: "\f225"; } + +.fa-transgender-alt::before { + content: "\f225"; } + +.fa-mercury::before { + content: "\f223"; } + +.fa-arrow-turn-down::before { + content: "\f149"; } + +.fa-level-down::before { + content: "\f149"; } + +.fa-person-falling-burst::before { + content: "\e547"; } + +.fa-award::before { + content: "\f559"; } + +.fa-ticket-simple::before { + content: "\f3ff"; } + +.fa-ticket-alt::before { + content: "\f3ff"; } + +.fa-building::before { + content: "\f1ad"; } + +.fa-angles-left::before { + content: "\f100"; } + +.fa-angle-double-left::before { + content: "\f100"; } + +.fa-qrcode::before { + content: "\f029"; } + +.fa-clock-rotate-left::before { + content: "\f1da"; } + +.fa-history::before { + content: "\f1da"; } + +.fa-face-grin-beam-sweat::before { + content: "\f583"; } + +.fa-grin-beam-sweat::before { + content: "\f583"; } + +.fa-file-export::before { + content: "\f56e"; } + +.fa-arrow-right-from-file::before { + content: "\f56e"; } + +.fa-shield::before { + content: "\f132"; } + +.fa-shield-blank::before { + content: "\f132"; } + +.fa-arrow-up-short-wide::before { + content: "\f885"; } + +.fa-sort-amount-up-alt::before { + content: "\f885"; } + +.fa-house-medical::before { + content: "\e3b2"; } + +.fa-golf-ball-tee::before { + content: "\f450"; } + +.fa-golf-ball::before { + content: "\f450"; } + +.fa-circle-chevron-left::before { + content: "\f137"; } + +.fa-chevron-circle-left::before { + content: "\f137"; } + +.fa-house-chimney-window::before { + content: "\e00d"; } + +.fa-pen-nib::before { + content: "\f5ad"; } + +.fa-tent-arrow-turn-left::before { + content: "\e580"; } + +.fa-tents::before { + content: "\e582"; } + +.fa-wand-magic::before { + content: "\f0d0"; } + +.fa-magic::before { + content: "\f0d0"; } + +.fa-dog::before { + content: "\f6d3"; } + +.fa-carrot::before { + content: "\f787"; } + +.fa-moon::before { + content: "\f186"; } + +.fa-wine-glass-empty::before { + content: "\f5ce"; } + +.fa-wine-glass-alt::before { + content: "\f5ce"; } + +.fa-cheese::before { + content: "\f7ef"; } + +.fa-yin-yang::before { + content: "\f6ad"; } + +.fa-music::before { + content: "\f001"; } + +.fa-code-commit::before { + content: "\f386"; } + +.fa-temperature-low::before { + content: "\f76b"; } + +.fa-person-biking::before { + content: "\f84a"; } + +.fa-biking::before { + content: "\f84a"; } + +.fa-broom::before { + content: "\f51a"; } + +.fa-shield-heart::before { + content: "\e574"; } + +.fa-gopuram::before { + content: "\f664"; } + +.fa-earth-oceania::before { + content: "\e47b"; } + +.fa-globe-oceania::before { + content: "\e47b"; } + +.fa-square-xmark::before { + content: "\f2d3"; } + +.fa-times-square::before { + content: "\f2d3"; } + +.fa-xmark-square::before { + content: "\f2d3"; } + +.fa-hashtag::before { + content: "\23"; } + +.fa-up-right-and-down-left-from-center::before { + content: "\f424"; } + +.fa-expand-alt::before { + content: "\f424"; } + +.fa-oil-can::before { + content: "\f613"; } + +.fa-t::before { + content: "\54"; } + +.fa-hippo::before { + content: "\f6ed"; } + +.fa-chart-column::before { + content: "\e0e3"; } + +.fa-infinity::before { + content: "\f534"; } + +.fa-vial-circle-check::before { + content: "\e596"; } + +.fa-person-arrow-down-to-line::before { + content: "\e538"; } + +.fa-voicemail::before { + content: "\f897"; } + +.fa-fan::before { + content: "\f863"; } + +.fa-person-walking-luggage::before { + content: "\e554"; } + +.fa-up-down::before { + content: "\f338"; } + +.fa-arrows-alt-v::before { + content: "\f338"; } + +.fa-cloud-moon-rain::before { + content: "\f73c"; } + +.fa-calendar::before { + content: "\f133"; } + +.fa-trailer::before { + content: "\e041"; } + +.fa-bahai::before { + content: "\f666"; } + +.fa-haykal::before { + content: "\f666"; } + +.fa-sd-card::before { + content: "\f7c2"; } + +.fa-dragon::before { + content: "\f6d5"; } + +.fa-shoe-prints::before { + content: "\f54b"; } + +.fa-circle-plus::before { + content: "\f055"; } + +.fa-plus-circle::before { + content: "\f055"; } + +.fa-face-grin-tongue-wink::before { + content: "\f58b"; } + +.fa-grin-tongue-wink::before { + content: "\f58b"; } + +.fa-hand-holding::before { + content: "\f4bd"; } + +.fa-plug-circle-exclamation::before { + content: "\e55d"; } + +.fa-link-slash::before { + content: "\f127"; } + +.fa-chain-broken::before { + content: "\f127"; } + +.fa-chain-slash::before { + content: "\f127"; } + +.fa-unlink::before { + content: "\f127"; } + +.fa-clone::before { + content: "\f24d"; } + +.fa-person-walking-arrow-loop-left::before { + content: "\e551"; } + +.fa-arrow-up-z-a::before { + content: "\f882"; } + +.fa-sort-alpha-up-alt::before { + content: "\f882"; } + +.fa-fire-flame-curved::before { + content: "\f7e4"; } + +.fa-fire-alt::before { + content: "\f7e4"; } + +.fa-tornado::before { + content: "\f76f"; } + +.fa-file-circle-plus::before { + content: "\e494"; } + +.fa-book-quran::before { + content: "\f687"; } + +.fa-quran::before { + content: "\f687"; } + +.fa-anchor::before { + content: "\f13d"; } + +.fa-border-all::before { + content: "\f84c"; } + +.fa-face-angry::before { + content: "\f556"; } + +.fa-angry::before { + content: "\f556"; } + +.fa-cookie-bite::before { + content: "\f564"; } + +.fa-arrow-trend-down::before { + content: "\e097"; } + +.fa-rss::before { + content: "\f09e"; } + +.fa-feed::before { + content: "\f09e"; } + +.fa-draw-polygon::before { + content: "\f5ee"; } + +.fa-scale-balanced::before { + content: "\f24e"; } + +.fa-balance-scale::before { + content: "\f24e"; } + +.fa-gauge-simple-high::before { + content: "\f62a"; } + +.fa-tachometer::before { + content: "\f62a"; } + +.fa-tachometer-fast::before { + content: "\f62a"; } + +.fa-shower::before { + content: "\f2cc"; } + +.fa-desktop::before { + content: "\f390"; } + +.fa-desktop-alt::before { + content: "\f390"; } + +.fa-m::before { + content: "\4d"; } + +.fa-table-list::before { + content: "\f00b"; } + +.fa-th-list::before { + content: "\f00b"; } + +.fa-comment-sms::before { + content: "\f7cd"; } + +.fa-sms::before { + content: "\f7cd"; } + +.fa-book::before { + content: "\f02d"; } + +.fa-user-plus::before { + content: "\f234"; } + +.fa-check::before { + content: "\f00c"; } + +.fa-battery-three-quarters::before { + content: "\f241"; } + +.fa-battery-4::before { + content: "\f241"; } + +.fa-house-circle-check::before { + content: "\e509"; } + +.fa-angle-left::before { + content: "\f104"; } + +.fa-diagram-successor::before { + content: "\e47a"; } + +.fa-truck-arrow-right::before { + content: "\e58b"; } + +.fa-arrows-split-up-and-left::before { + content: "\e4bc"; } + +.fa-hand-fist::before { + content: "\f6de"; } + +.fa-fist-raised::before { + content: "\f6de"; } + +.fa-cloud-moon::before { + content: "\f6c3"; } + +.fa-briefcase::before { + content: "\f0b1"; } + +.fa-person-falling::before { + content: "\e546"; } + +.fa-image-portrait::before { + content: "\f3e0"; } + +.fa-portrait::before { + content: "\f3e0"; } + +.fa-user-tag::before { + content: "\f507"; } + +.fa-rug::before { + content: "\e569"; } + +.fa-earth-europe::before { + content: "\f7a2"; } + +.fa-globe-europe::before { + content: "\f7a2"; } + +.fa-cart-flatbed-suitcase::before { + content: "\f59d"; } + +.fa-luggage-cart::before { + content: "\f59d"; } + +.fa-rectangle-xmark::before { + content: "\f410"; } + +.fa-rectangle-times::before { + content: "\f410"; } + +.fa-times-rectangle::before { + content: "\f410"; } + +.fa-window-close::before { + content: "\f410"; } + +.fa-baht-sign::before { + content: "\e0ac"; } + +.fa-book-open::before { + content: "\f518"; } + +.fa-book-journal-whills::before { + content: "\f66a"; } + +.fa-journal-whills::before { + content: "\f66a"; } + +.fa-handcuffs::before { + content: "\e4f8"; } + +.fa-triangle-exclamation::before { + content: "\f071"; } + +.fa-exclamation-triangle::before { + content: "\f071"; } + +.fa-warning::before { + content: "\f071"; } + +.fa-database::before { + content: "\f1c0"; } + +.fa-share::before { + content: "\f064"; } + +.fa-arrow-turn-right::before { + content: "\f064"; } + +.fa-mail-forward::before { + content: "\f064"; } + +.fa-bottle-droplet::before { + content: "\e4c4"; } + +.fa-mask-face::before { + content: "\e1d7"; } + +.fa-hill-rockslide::before { + content: "\e508"; } + +.fa-right-left::before { + content: "\f362"; } + +.fa-exchange-alt::before { + content: "\f362"; } + +.fa-paper-plane::before { + content: "\f1d8"; } + +.fa-road-circle-exclamation::before { + content: "\e565"; } + +.fa-dungeon::before { + content: "\f6d9"; } + +.fa-align-right::before { + content: "\f038"; } + +.fa-money-bill-1-wave::before { + content: "\f53b"; } + +.fa-money-bill-wave-alt::before { + content: "\f53b"; } + +.fa-life-ring::before { + content: "\f1cd"; } + +.fa-hands::before { + content: "\f2a7"; } + +.fa-sign-language::before { + content: "\f2a7"; } + +.fa-signing::before { + content: "\f2a7"; } + +.fa-calendar-day::before { + content: "\f783"; } + +.fa-water-ladder::before { + content: "\f5c5"; } + +.fa-ladder-water::before { + content: "\f5c5"; } + +.fa-swimming-pool::before { + content: "\f5c5"; } + +.fa-arrows-up-down::before { + content: "\f07d"; } + +.fa-arrows-v::before { + content: "\f07d"; } + +.fa-face-grimace::before { + content: "\f57f"; } + +.fa-grimace::before { + content: "\f57f"; } + +.fa-wheelchair-move::before { + content: "\e2ce"; } + +.fa-wheelchair-alt::before { + content: "\e2ce"; } + +.fa-turn-down::before { + content: "\f3be"; } + +.fa-level-down-alt::before { + content: "\f3be"; } + +.fa-person-walking-arrow-right::before { + content: "\e552"; } + +.fa-square-envelope::before { + content: "\f199"; } + +.fa-envelope-square::before { + content: "\f199"; } + +.fa-dice::before { + content: "\f522"; } + +.fa-bowling-ball::before { + content: "\f436"; } + +.fa-brain::before { + content: "\f5dc"; } + +.fa-bandage::before { + content: "\f462"; } + +.fa-band-aid::before { + content: "\f462"; } + +.fa-calendar-minus::before { + content: "\f272"; } + +.fa-circle-xmark::before { + content: "\f057"; } + +.fa-times-circle::before { + content: "\f057"; } + +.fa-xmark-circle::before { + content: "\f057"; } + +.fa-gifts::before { + content: "\f79c"; } + +.fa-hotel::before { + content: "\f594"; } + +.fa-earth-asia::before { + content: "\f57e"; } + +.fa-globe-asia::before { + content: "\f57e"; } + +.fa-id-card-clip::before { + content: "\f47f"; } + +.fa-id-card-alt::before { + content: "\f47f"; } + +.fa-magnifying-glass-plus::before { + content: "\f00e"; } + +.fa-search-plus::before { + content: "\f00e"; } + +.fa-thumbs-up::before { + content: "\f164"; } + +.fa-user-clock::before { + content: "\f4fd"; } + +.fa-hand-dots::before { + content: "\f461"; } + +.fa-allergies::before { + content: "\f461"; } + +.fa-file-invoice::before { + content: "\f570"; } + +.fa-window-minimize::before { + content: "\f2d1"; } + +.fa-mug-saucer::before { + content: "\f0f4"; } + +.fa-coffee::before { + content: "\f0f4"; } + +.fa-brush::before { + content: "\f55d"; } + +.fa-mask::before { + content: "\f6fa"; } + +.fa-magnifying-glass-minus::before { + content: "\f010"; } + +.fa-search-minus::before { + content: "\f010"; } + +.fa-ruler-vertical::before { + content: "\f548"; } + +.fa-user-large::before { + content: "\f406"; } + +.fa-user-alt::before { + content: "\f406"; } + +.fa-train-tram::before { + content: "\e5b4"; } + +.fa-user-nurse::before { + content: "\f82f"; } + +.fa-syringe::before { + content: "\f48e"; } + +.fa-cloud-sun::before { + content: "\f6c4"; } + +.fa-stopwatch-20::before { + content: "\e06f"; } + +.fa-square-full::before { + content: "\f45c"; } + +.fa-magnet::before { + content: "\f076"; } + +.fa-jar::before { + content: "\e516"; } + +.fa-note-sticky::before { + content: "\f249"; } + +.fa-sticky-note::before { + content: "\f249"; } + +.fa-bug-slash::before { + content: "\e490"; } + +.fa-arrow-up-from-water-pump::before { + content: "\e4b6"; } + +.fa-bone::before { + content: "\f5d7"; } + +.fa-user-injured::before { + content: "\f728"; } + +.fa-face-sad-tear::before { + content: "\f5b4"; } + +.fa-sad-tear::before { + content: "\f5b4"; } + +.fa-plane::before { + content: "\f072"; } + +.fa-tent-arrows-down::before { + content: "\e581"; } + +.fa-exclamation::before { + content: "\21"; } + +.fa-arrows-spin::before { + content: "\e4bb"; } + +.fa-print::before { + content: "\f02f"; } + +.fa-turkish-lira-sign::before { + content: "\e2bb"; } + +.fa-try::before { + content: "\e2bb"; } + +.fa-turkish-lira::before { + content: "\e2bb"; } + +.fa-dollar-sign::before { + content: "\24"; } + +.fa-dollar::before { + content: "\24"; } + +.fa-usd::before { + content: "\24"; } + +.fa-x::before { + content: "\58"; } + +.fa-magnifying-glass-dollar::before { + content: "\f688"; } + +.fa-search-dollar::before { + content: "\f688"; } + +.fa-users-gear::before { + content: "\f509"; } + +.fa-users-cog::before { + content: "\f509"; } + +.fa-person-military-pointing::before { + content: "\e54a"; } + +.fa-building-columns::before { + content: "\f19c"; } + +.fa-bank::before { + content: "\f19c"; } + +.fa-institution::before { + content: "\f19c"; } + +.fa-museum::before { + content: "\f19c"; } + +.fa-university::before { + content: "\f19c"; } + +.fa-umbrella::before { + content: "\f0e9"; } + +.fa-trowel::before { + content: "\e589"; } + +.fa-d::before { + content: "\44"; } + +.fa-stapler::before { + content: "\e5af"; } + +.fa-masks-theater::before { + content: "\f630"; } + +.fa-theater-masks::before { + content: "\f630"; } + +.fa-kip-sign::before { + content: "\e1c4"; } + +.fa-hand-point-left::before { + content: "\f0a5"; } + +.fa-handshake-simple::before { + content: "\f4c6"; } + +.fa-handshake-alt::before { + content: "\f4c6"; } + +.fa-jet-fighter::before { + content: "\f0fb"; } + +.fa-fighter-jet::before { + content: "\f0fb"; } + +.fa-square-share-nodes::before { + content: "\f1e1"; } + +.fa-share-alt-square::before { + content: "\f1e1"; } + +.fa-barcode::before { + content: "\f02a"; } + +.fa-plus-minus::before { + content: "\e43c"; } + +.fa-video::before { + content: "\f03d"; } + +.fa-video-camera::before { + content: "\f03d"; } + +.fa-graduation-cap::before { + content: "\f19d"; } + +.fa-mortar-board::before { + content: "\f19d"; } + +.fa-hand-holding-medical::before { + content: "\e05c"; } + +.fa-person-circle-check::before { + content: "\e53e"; } + +.fa-turn-up::before { + content: "\f3bf"; } + +.fa-level-up-alt::before { + content: "\f3bf"; } + +.sr-only, +.fa-sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; } + +.sr-only-focusable:not(:focus), +.fa-sr-only-focusable:not(:focus) { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; } +:root, :host { + --fa-style-family-brands: 'Font Awesome 6 Brands'; + --fa-font-brands: normal 400 1em/1 'Font Awesome 6 Brands'; } + +@font-face { + font-family: 'Font Awesome 6 Brands'; + font-style: normal; + font-weight: 400; + font-display: block; + src: url("../webfonts/FontAwesome6Brands-Regular-400.woff2") format("woff2"), url("../webfonts/FontAwesome6Brands-Regular-400.ttf") format("truetype"); } + +.fab, +.fa-brands { + font-weight: 400; } + +.fa-monero:before { + content: "\f3d0"; } + +.fa-hooli:before { + content: "\f427"; } + +.fa-yelp:before { + content: "\f1e9"; } + +.fa-cc-visa:before { + content: "\f1f0"; } + +.fa-lastfm:before { + content: "\f202"; } + +.fa-shopware:before { + content: "\f5b5"; } + +.fa-creative-commons-nc:before { + content: "\f4e8"; } + +.fa-aws:before { + content: "\f375"; } + +.fa-redhat:before { + content: "\f7bc"; } + +.fa-yoast:before { + content: "\f2b1"; } + +.fa-cloudflare:before { + content: "\e07d"; } + +.fa-ups:before { + content: "\f7e0"; } + +.fa-wpexplorer:before { + content: "\f2de"; } + +.fa-dyalog:before { + content: "\f399"; } + +.fa-bity:before { + content: "\f37a"; } + +.fa-stackpath:before { + content: "\f842"; } + +.fa-buysellads:before { + content: "\f20d"; } + +.fa-first-order:before { + content: "\f2b0"; } + +.fa-modx:before { + content: "\f285"; } + +.fa-guilded:before { + content: "\e07e"; } + +.fa-vnv:before { + content: "\f40b"; } + +.fa-square-js:before { + content: "\f3b9"; } + +.fa-js-square:before { + content: "\f3b9"; } + +.fa-microsoft:before { + content: "\f3ca"; } + +.fa-qq:before { + content: "\f1d6"; } + +.fa-orcid:before { + content: "\f8d2"; } + +.fa-java:before { + content: "\f4e4"; } + +.fa-invision:before { + content: "\f7b0"; } + +.fa-creative-commons-pd-alt:before { + content: "\f4ed"; } + +.fa-centercode:before { + content: "\f380"; } + +.fa-glide-g:before { + content: "\f2a6"; } + +.fa-drupal:before { + content: "\f1a9"; } + +.fa-hire-a-helper:before { + content: "\f3b0"; } + +.fa-creative-commons-by:before { + content: "\f4e7"; } + +.fa-unity:before { + content: "\e049"; } + +.fa-whmcs:before { + content: "\f40d"; } + +.fa-rocketchat:before { + content: "\f3e8"; } + +.fa-vk:before { + content: "\f189"; } + +.fa-untappd:before { + content: "\f405"; } + +.fa-mailchimp:before { + content: "\f59e"; } + +.fa-css3-alt:before { + content: "\f38b"; } + +.fa-square-reddit:before { + content: "\f1a2"; } + +.fa-reddit-square:before { + content: "\f1a2"; } + +.fa-vimeo-v:before { + content: "\f27d"; } + +.fa-contao:before { + content: "\f26d"; } + +.fa-square-font-awesome:before { + content: "\e5ad"; } + +.fa-deskpro:before { + content: "\f38f"; } + +.fa-sistrix:before { + content: "\f3ee"; } + +.fa-square-instagram:before { + content: "\e055"; } + +.fa-instagram-square:before { + content: "\e055"; } + +.fa-battle-net:before { + content: "\f835"; } + +.fa-the-red-yeti:before { + content: "\f69d"; } + +.fa-square-hacker-news:before { + content: "\f3af"; } + +.fa-hacker-news-square:before { + content: "\f3af"; } + +.fa-edge:before { + content: "\f282"; } + +.fa-threads:before { + content: "\e618"; } + +.fa-napster:before { + content: "\f3d2"; } + +.fa-square-snapchat:before { + content: "\f2ad"; } + +.fa-snapchat-square:before { + content: "\f2ad"; } + +.fa-google-plus-g:before { + content: "\f0d5"; } + +.fa-artstation:before { + content: "\f77a"; } + +.fa-markdown:before { + content: "\f60f"; } + +.fa-sourcetree:before { + content: "\f7d3"; } + +.fa-google-plus:before { + content: "\f2b3"; } + +.fa-diaspora:before { + content: "\f791"; } + +.fa-foursquare:before { + content: "\f180"; } + +.fa-stack-overflow:before { + content: "\f16c"; } + +.fa-github-alt:before { + content: "\f113"; } + +.fa-phoenix-squadron:before { + content: "\f511"; } + +.fa-pagelines:before { + content: "\f18c"; } + +.fa-algolia:before { + content: "\f36c"; } + +.fa-red-river:before { + content: "\f3e3"; } + +.fa-creative-commons-sa:before { + content: "\f4ef"; } + +.fa-safari:before { + content: "\f267"; } + +.fa-google:before { + content: "\f1a0"; } + +.fa-square-font-awesome-stroke:before { + content: "\f35c"; } + +.fa-font-awesome-alt:before { + content: "\f35c"; } + +.fa-atlassian:before { + content: "\f77b"; } + +.fa-linkedin-in:before { + content: "\f0e1"; } + +.fa-digital-ocean:before { + content: "\f391"; } + +.fa-nimblr:before { + content: "\f5a8"; } + +.fa-chromecast:before { + content: "\f838"; } + +.fa-evernote:before { + content: "\f839"; } + +.fa-hacker-news:before { + content: "\f1d4"; } + +.fa-creative-commons-sampling:before { + content: "\f4f0"; } + +.fa-adversal:before { + content: "\f36a"; } + +.fa-creative-commons:before { + content: "\f25e"; } + +.fa-watchman-monitoring:before { + content: "\e087"; } + +.fa-fonticons:before { + content: "\f280"; } + +.fa-weixin:before { + content: "\f1d7"; } + +.fa-shirtsinbulk:before { + content: "\f214"; } + +.fa-codepen:before { + content: "\f1cb"; } + +.fa-git-alt:before { + content: "\f841"; } + +.fa-lyft:before { + content: "\f3c3"; } + +.fa-rev:before { + content: "\f5b2"; } + +.fa-windows:before { + content: "\f17a"; } + +.fa-wizards-of-the-coast:before { + content: "\f730"; } + +.fa-square-viadeo:before { + content: "\f2aa"; } + +.fa-viadeo-square:before { + content: "\f2aa"; } + +.fa-meetup:before { + content: "\f2e0"; } + +.fa-centos:before { + content: "\f789"; } + +.fa-adn:before { + content: "\f170"; } + +.fa-cloudsmith:before { + content: "\f384"; } + +.fa-pied-piper-alt:before { + content: "\f1a8"; } + +.fa-square-dribbble:before { + content: "\f397"; } + +.fa-dribbble-square:before { + content: "\f397"; } + +.fa-codiepie:before { + content: "\f284"; } + +.fa-node:before { + content: "\f419"; } + +.fa-mix:before { + content: "\f3cb"; } + +.fa-steam:before { + content: "\f1b6"; } + +.fa-cc-apple-pay:before { + content: "\f416"; } + +.fa-scribd:before { + content: "\f28a"; } + +.fa-debian:before { + content: "\e60b"; } + +.fa-openid:before { + content: "\f19b"; } + +.fa-instalod:before { + content: "\e081"; } + +.fa-expeditedssl:before { + content: "\f23e"; } + +.fa-sellcast:before { + content: "\f2da"; } + +.fa-square-twitter:before { + content: "\f081"; } + +.fa-twitter-square:before { + content: "\f081"; } + +.fa-r-project:before { + content: "\f4f7"; } + +.fa-delicious:before { + content: "\f1a5"; } + +.fa-freebsd:before { + content: "\f3a4"; } + +.fa-vuejs:before { + content: "\f41f"; } + +.fa-accusoft:before { + content: "\f369"; } + +.fa-ioxhost:before { + content: "\f208"; } + +.fa-fonticons-fi:before { + content: "\f3a2"; } + +.fa-app-store:before { + content: "\f36f"; } + +.fa-cc-mastercard:before { + content: "\f1f1"; } + +.fa-itunes-note:before { + content: "\f3b5"; } + +.fa-golang:before { + content: "\e40f"; } + +.fa-kickstarter:before { + content: "\f3bb"; } + +.fa-grav:before { + content: "\f2d6"; } + +.fa-weibo:before { + content: "\f18a"; } + +.fa-uncharted:before { + content: "\e084"; } + +.fa-firstdraft:before { + content: "\f3a1"; } + +.fa-square-youtube:before { + content: "\f431"; } + +.fa-youtube-square:before { + content: "\f431"; } + +.fa-wikipedia-w:before { + content: "\f266"; } + +.fa-wpressr:before { + content: "\f3e4"; } + +.fa-rendact:before { + content: "\f3e4"; } + +.fa-angellist:before { + content: "\f209"; } + +.fa-galactic-republic:before { + content: "\f50c"; } + +.fa-nfc-directional:before { + content: "\e530"; } + +.fa-skype:before { + content: "\f17e"; } + +.fa-joget:before { + content: "\f3b7"; } + +.fa-fedora:before { + content: "\f798"; } + +.fa-stripe-s:before { + content: "\f42a"; } + +.fa-meta:before { + content: "\e49b"; } + +.fa-laravel:before { + content: "\f3bd"; } + +.fa-hotjar:before { + content: "\f3b1"; } + +.fa-bluetooth-b:before { + content: "\f294"; } + +.fa-sticker-mule:before { + content: "\f3f7"; } + +.fa-creative-commons-zero:before { + content: "\f4f3"; } + +.fa-hips:before { + content: "\f452"; } + +.fa-behance:before { + content: "\f1b4"; } + +.fa-reddit:before { + content: "\f1a1"; } + +.fa-discord:before { + content: "\f392"; } + +.fa-chrome:before { + content: "\f268"; } + +.fa-app-store-ios:before { + content: "\f370"; } + +.fa-cc-discover:before { + content: "\f1f2"; } + +.fa-wpbeginner:before { + content: "\f297"; } + +.fa-confluence:before { + content: "\f78d"; } + +.fa-mdb:before { + content: "\f8ca"; } + +.fa-dochub:before { + content: "\f394"; } + +.fa-accessible-icon:before { + content: "\f368"; } + +.fa-ebay:before { + content: "\f4f4"; } + +.fa-amazon:before { + content: "\f270"; } + +.fa-unsplash:before { + content: "\e07c"; } + +.fa-yarn:before { + content: "\f7e3"; } + +.fa-square-steam:before { + content: "\f1b7"; } + +.fa-steam-square:before { + content: "\f1b7"; } + +.fa-500px:before { + content: "\f26e"; } + +.fa-square-vimeo:before { + content: "\f194"; } + +.fa-vimeo-square:before { + content: "\f194"; } + +.fa-asymmetrik:before { + content: "\f372"; } + +.fa-font-awesome:before { + content: "\f2b4"; } + +.fa-font-awesome-flag:before { + content: "\f2b4"; } + +.fa-font-awesome-logo-full:before { + content: "\f2b4"; } + +.fa-gratipay:before { + content: "\f184"; } + +.fa-apple:before { + content: "\f179"; } + +.fa-hive:before { + content: "\e07f"; } + +.fa-gitkraken:before { + content: "\f3a6"; } + +.fa-keybase:before { + content: "\f4f5"; } + +.fa-apple-pay:before { + content: "\f415"; } + +.fa-padlet:before { + content: "\e4a0"; } + +.fa-amazon-pay:before { + content: "\f42c"; } + +.fa-square-github:before { + content: "\f092"; } + +.fa-github-square:before { + content: "\f092"; } + +.fa-stumbleupon:before { + content: "\f1a4"; } + +.fa-fedex:before { + content: "\f797"; } + +.fa-phoenix-framework:before { + content: "\f3dc"; } + +.fa-shopify:before { + content: "\e057"; } + +.fa-neos:before { + content: "\f612"; } + +.fa-square-threads:before { + content: "\e619"; } + +.fa-hackerrank:before { + content: "\f5f7"; } + +.fa-researchgate:before { + content: "\f4f8"; } + +.fa-swift:before { + content: "\f8e1"; } + +.fa-angular:before { + content: "\f420"; } + +.fa-speakap:before { + content: "\f3f3"; } + +.fa-angrycreative:before { + content: "\f36e"; } + +.fa-y-combinator:before { + content: "\f23b"; } + +.fa-empire:before { + content: "\f1d1"; } + +.fa-envira:before { + content: "\f299"; } + +.fa-square-gitlab:before { + content: "\e5ae"; } + +.fa-gitlab-square:before { + content: "\e5ae"; } + +.fa-studiovinari:before { + content: "\f3f8"; } + +.fa-pied-piper:before { + content: "\f2ae"; } + +.fa-wordpress:before { + content: "\f19a"; } + +.fa-product-hunt:before { + content: "\f288"; } + +.fa-firefox:before { + content: "\f269"; } + +.fa-linode:before { + content: "\f2b8"; } + +.fa-goodreads:before { + content: "\f3a8"; } + +.fa-square-odnoklassniki:before { + content: "\f264"; } + +.fa-odnoklassniki-square:before { + content: "\f264"; } + +.fa-jsfiddle:before { + content: "\f1cc"; } + +.fa-sith:before { + content: "\f512"; } + +.fa-themeisle:before { + content: "\f2b2"; } + +.fa-page4:before { + content: "\f3d7"; } + +.fa-hashnode:before { + content: "\e499"; } + +.fa-react:before { + content: "\f41b"; } + +.fa-cc-paypal:before { + content: "\f1f4"; } + +.fa-squarespace:before { + content: "\f5be"; } + +.fa-cc-stripe:before { + content: "\f1f5"; } + +.fa-creative-commons-share:before { + content: "\f4f2"; } + +.fa-bitcoin:before { + content: "\f379"; } + +.fa-keycdn:before { + content: "\f3ba"; } + +.fa-opera:before { + content: "\f26a"; } + +.fa-itch-io:before { + content: "\f83a"; } + +.fa-umbraco:before { + content: "\f8e8"; } + +.fa-galactic-senate:before { + content: "\f50d"; } + +.fa-ubuntu:before { + content: "\f7df"; } + +.fa-draft2digital:before { + content: "\f396"; } + +.fa-stripe:before { + content: "\f429"; } + +.fa-houzz:before { + content: "\f27c"; } + +.fa-gg:before { + content: "\f260"; } + +.fa-dhl:before { + content: "\f790"; } + +.fa-square-pinterest:before { + content: "\f0d3"; } + +.fa-pinterest-square:before { + content: "\f0d3"; } + +.fa-xing:before { + content: "\f168"; } + +.fa-blackberry:before { + content: "\f37b"; } + +.fa-creative-commons-pd:before { + content: "\f4ec"; } + +.fa-playstation:before { + content: "\f3df"; } + +.fa-quinscape:before { + content: "\f459"; } + +.fa-less:before { + content: "\f41d"; } + +.fa-blogger-b:before { + content: "\f37d"; } + +.fa-opencart:before { + content: "\f23d"; } + +.fa-vine:before { + content: "\f1ca"; } + +.fa-paypal:before { + content: "\f1ed"; } + +.fa-gitlab:before { + content: "\f296"; } + +.fa-typo3:before { + content: "\f42b"; } + +.fa-reddit-alien:before { + content: "\f281"; } + +.fa-yahoo:before { + content: "\f19e"; } + +.fa-dailymotion:before { + content: "\e052"; } + +.fa-affiliatetheme:before { + content: "\f36b"; } + +.fa-pied-piper-pp:before { + content: "\f1a7"; } + +.fa-bootstrap:before { + content: "\f836"; } + +.fa-odnoklassniki:before { + content: "\f263"; } + +.fa-nfc-symbol:before { + content: "\e531"; } + +.fa-ethereum:before { + content: "\f42e"; } + +.fa-speaker-deck:before { + content: "\f83c"; } + +.fa-creative-commons-nc-eu:before { + content: "\f4e9"; } + +.fa-patreon:before { + content: "\f3d9"; } + +.fa-avianex:before { + content: "\f374"; } + +.fa-ello:before { + content: "\f5f1"; } + +.fa-gofore:before { + content: "\f3a7"; } + +.fa-bimobject:before { + content: "\f378"; } + +.fa-facebook-f:before { + content: "\f39e"; } + +.fa-square-google-plus:before { + content: "\f0d4"; } + +.fa-google-plus-square:before { + content: "\f0d4"; } + +.fa-mandalorian:before { + content: "\f50f"; } + +.fa-first-order-alt:before { + content: "\f50a"; } + +.fa-osi:before { + content: "\f41a"; } + +.fa-google-wallet:before { + content: "\f1ee"; } + +.fa-d-and-d-beyond:before { + content: "\f6ca"; } + +.fa-periscope:before { + content: "\f3da"; } + +.fa-fulcrum:before { + content: "\f50b"; } + +.fa-cloudscale:before { + content: "\f383"; } + +.fa-forumbee:before { + content: "\f211"; } + +.fa-mizuni:before { + content: "\f3cc"; } + +.fa-schlix:before { + content: "\f3ea"; } + +.fa-square-xing:before { + content: "\f169"; } + +.fa-xing-square:before { + content: "\f169"; } + +.fa-bandcamp:before { + content: "\f2d5"; } + +.fa-wpforms:before { + content: "\f298"; } + +.fa-cloudversify:before { + content: "\f385"; } + +.fa-usps:before { + content: "\f7e1"; } + +.fa-megaport:before { + content: "\f5a3"; } + +.fa-magento:before { + content: "\f3c4"; } + +.fa-spotify:before { + content: "\f1bc"; } + +.fa-optin-monster:before { + content: "\f23c"; } + +.fa-fly:before { + content: "\f417"; } + +.fa-aviato:before { + content: "\f421"; } + +.fa-itunes:before { + content: "\f3b4"; } + +.fa-cuttlefish:before { + content: "\f38c"; } + +.fa-blogger:before { + content: "\f37c"; } + +.fa-flickr:before { + content: "\f16e"; } + +.fa-viber:before { + content: "\f409"; } + +.fa-soundcloud:before { + content: "\f1be"; } + +.fa-digg:before { + content: "\f1a6"; } + +.fa-tencent-weibo:before { + content: "\f1d5"; } + +.fa-symfony:before { + content: "\f83d"; } + +.fa-maxcdn:before { + content: "\f136"; } + +.fa-etsy:before { + content: "\f2d7"; } + +.fa-facebook-messenger:before { + content: "\f39f"; } + +.fa-audible:before { + content: "\f373"; } + +.fa-think-peaks:before { + content: "\f731"; } + +.fa-bilibili:before { + content: "\e3d9"; } + +.fa-erlang:before { + content: "\f39d"; } + +.fa-x-twitter:before { + content: "\e61b"; } + +.fa-cotton-bureau:before { + content: "\f89e"; } + +.fa-dashcube:before { + content: "\f210"; } + +.fa-42-group:before { + content: "\e080"; } + +.fa-innosoft:before { + content: "\e080"; } + +.fa-stack-exchange:before { + content: "\f18d"; } + +.fa-elementor:before { + content: "\f430"; } + +.fa-square-pied-piper:before { + content: "\e01e"; } + +.fa-pied-piper-square:before { + content: "\e01e"; } + +.fa-creative-commons-nd:before { + content: "\f4eb"; } + +.fa-palfed:before { + content: "\f3d8"; } + +.fa-superpowers:before { + content: "\f2dd"; } + +.fa-resolving:before { + content: "\f3e7"; } + +.fa-xbox:before { + content: "\f412"; } + +.fa-searchengin:before { + content: "\f3eb"; } + +.fa-tiktok:before { + content: "\e07b"; } + +.fa-square-facebook:before { + content: "\f082"; } + +.fa-facebook-square:before { + content: "\f082"; } + +.fa-renren:before { + content: "\f18b"; } + +.fa-linux:before { + content: "\f17c"; } + +.fa-glide:before { + content: "\f2a5"; } + +.fa-linkedin:before { + content: "\f08c"; } + +.fa-hubspot:before { + content: "\f3b2"; } + +.fa-deploydog:before { + content: "\f38e"; } + +.fa-twitch:before { + content: "\f1e8"; } + +.fa-ravelry:before { + content: "\f2d9"; } + +.fa-mixer:before { + content: "\e056"; } + +.fa-square-lastfm:before { + content: "\f203"; } + +.fa-lastfm-square:before { + content: "\f203"; } + +.fa-vimeo:before { + content: "\f40a"; } + +.fa-mendeley:before { + content: "\f7b3"; } + +.fa-uniregistry:before { + content: "\f404"; } + +.fa-figma:before { + content: "\f799"; } + +.fa-creative-commons-remix:before { + content: "\f4ee"; } + +.fa-cc-amazon-pay:before { + content: "\f42d"; } + +.fa-dropbox:before { + content: "\f16b"; } + +.fa-instagram:before { + content: "\f16d"; } + +.fa-cmplid:before { + content: "\e360"; } + +.fa-facebook:before { + content: "\f09a"; } + +.fa-gripfire:before { + content: "\f3ac"; } + +.fa-jedi-order:before { + content: "\f50e"; } + +.fa-uikit:before { + content: "\f403"; } + +.fa-fort-awesome-alt:before { + content: "\f3a3"; } + +.fa-phabricator:before { + content: "\f3db"; } + +.fa-ussunnah:before { + content: "\f407"; } + +.fa-earlybirds:before { + content: "\f39a"; } + +.fa-trade-federation:before { + content: "\f513"; } + +.fa-autoprefixer:before { + content: "\f41c"; } + +.fa-whatsapp:before { + content: "\f232"; } + +.fa-slideshare:before { + content: "\f1e7"; } + +.fa-google-play:before { + content: "\f3ab"; } + +.fa-viadeo:before { + content: "\f2a9"; } + +.fa-line:before { + content: "\f3c0"; } + +.fa-google-drive:before { + content: "\f3aa"; } + +.fa-servicestack:before { + content: "\f3ec"; } + +.fa-simplybuilt:before { + content: "\f215"; } + +.fa-bitbucket:before { + content: "\f171"; } + +.fa-imdb:before { + content: "\f2d8"; } + +.fa-deezer:before { + content: "\e077"; } + +.fa-raspberry-pi:before { + content: "\f7bb"; } + +.fa-jira:before { + content: "\f7b1"; } + +.fa-docker:before { + content: "\f395"; } + +.fa-screenpal:before { + content: "\e570"; } + +.fa-bluetooth:before { + content: "\f293"; } + +.fa-gitter:before { + content: "\f426"; } + +.fa-d-and-d:before { + content: "\f38d"; } + +.fa-microblog:before { + content: "\e01a"; } + +.fa-cc-diners-club:before { + content: "\f24c"; } + +.fa-gg-circle:before { + content: "\f261"; } + +.fa-pied-piper-hat:before { + content: "\f4e5"; } + +.fa-kickstarter-k:before { + content: "\f3bc"; } + +.fa-yandex:before { + content: "\f413"; } + +.fa-readme:before { + content: "\f4d5"; } + +.fa-html5:before { + content: "\f13b"; } + +.fa-sellsy:before { + content: "\f213"; } + +.fa-sass:before { + content: "\f41e"; } + +.fa-wirsindhandwerk:before { + content: "\e2d0"; } + +.fa-wsh:before { + content: "\e2d0"; } + +.fa-buromobelexperte:before { + content: "\f37f"; } + +.fa-salesforce:before { + content: "\f83b"; } + +.fa-octopus-deploy:before { + content: "\e082"; } + +.fa-medapps:before { + content: "\f3c6"; } + +.fa-ns8:before { + content: "\f3d5"; } + +.fa-pinterest-p:before { + content: "\f231"; } + +.fa-apper:before { + content: "\f371"; } + +.fa-fort-awesome:before { + content: "\f286"; } + +.fa-waze:before { + content: "\f83f"; } + +.fa-cc-jcb:before { + content: "\f24b"; } + +.fa-snapchat:before { + content: "\f2ab"; } + +.fa-snapchat-ghost:before { + content: "\f2ab"; } + +.fa-fantasy-flight-games:before { + content: "\f6dc"; } + +.fa-rust:before { + content: "\e07a"; } + +.fa-wix:before { + content: "\f5cf"; } + +.fa-square-behance:before { + content: "\f1b5"; } + +.fa-behance-square:before { + content: "\f1b5"; } + +.fa-supple:before { + content: "\f3f9"; } + +.fa-rebel:before { + content: "\f1d0"; } + +.fa-css3:before { + content: "\f13c"; } + +.fa-staylinked:before { + content: "\f3f5"; } + +.fa-kaggle:before { + content: "\f5fa"; } + +.fa-space-awesome:before { + content: "\e5ac"; } + +.fa-deviantart:before { + content: "\f1bd"; } + +.fa-cpanel:before { + content: "\f388"; } + +.fa-goodreads-g:before { + content: "\f3a9"; } + +.fa-square-git:before { + content: "\f1d2"; } + +.fa-git-square:before { + content: "\f1d2"; } + +.fa-square-tumblr:before { + content: "\f174"; } + +.fa-tumblr-square:before { + content: "\f174"; } + +.fa-trello:before { + content: "\f181"; } + +.fa-creative-commons-nc-jp:before { + content: "\f4ea"; } + +.fa-get-pocket:before { + content: "\f265"; } + +.fa-perbyte:before { + content: "\e083"; } + +.fa-grunt:before { + content: "\f3ad"; } + +.fa-weebly:before { + content: "\f5cc"; } + +.fa-connectdevelop:before { + content: "\f20e"; } + +.fa-leanpub:before { + content: "\f212"; } + +.fa-black-tie:before { + content: "\f27e"; } + +.fa-themeco:before { + content: "\f5c6"; } + +.fa-python:before { + content: "\f3e2"; } + +.fa-android:before { + content: "\f17b"; } + +.fa-bots:before { + content: "\e340"; } + +.fa-free-code-camp:before { + content: "\f2c5"; } + +.fa-hornbill:before { + content: "\f592"; } + +.fa-js:before { + content: "\f3b8"; } + +.fa-ideal:before { + content: "\e013"; } + +.fa-git:before { + content: "\f1d3"; } + +.fa-dev:before { + content: "\f6cc"; } + +.fa-sketch:before { + content: "\f7c6"; } + +.fa-yandex-international:before { + content: "\f414"; } + +.fa-cc-amex:before { + content: "\f1f3"; } + +.fa-uber:before { + content: "\f402"; } + +.fa-github:before { + content: "\f09b"; } + +.fa-php:before { + content: "\f457"; } + +.fa-alipay:before { + content: "\f642"; } + +.fa-youtube:before { + content: "\f167"; } + +.fa-skyatlas:before { + content: "\f216"; } + +.fa-firefox-browser:before { + content: "\e007"; } + +.fa-replyd:before { + content: "\f3e6"; } + +.fa-suse:before { + content: "\f7d6"; } + +.fa-jenkins:before { + content: "\f3b6"; } + +.fa-twitter:before { + content: "\f099"; } + +.fa-rockrms:before { + content: "\f3e9"; } + +.fa-pinterest:before { + content: "\f0d2"; } + +.fa-buffer:before { + content: "\f837"; } + +.fa-npm:before { + content: "\f3d4"; } + +.fa-yammer:before { + content: "\f840"; } + +.fa-btc:before { + content: "\f15a"; } + +.fa-dribbble:before { + content: "\f17d"; } + +.fa-stumbleupon-circle:before { + content: "\f1a3"; } + +.fa-internet-explorer:before { + content: "\f26b"; } + +.fa-stubber:before { + content: "\e5c7"; } + +.fa-telegram:before { + content: "\f2c6"; } + +.fa-telegram-plane:before { + content: "\f2c6"; } + +.fa-old-republic:before { + content: "\f510"; } + +.fa-odysee:before { + content: "\e5c6"; } + +.fa-square-whatsapp:before { + content: "\f40c"; } + +.fa-whatsapp-square:before { + content: "\f40c"; } + +.fa-node-js:before { + content: "\f3d3"; } + +.fa-edge-legacy:before { + content: "\e078"; } + +.fa-slack:before { + content: "\f198"; } + +.fa-slack-hash:before { + content: "\f198"; } + +.fa-medrt:before { + content: "\f3c8"; } + +.fa-usb:before { + content: "\f287"; } + +.fa-tumblr:before { + content: "\f173"; } + +.fa-vaadin:before { + content: "\f408"; } + +.fa-quora:before { + content: "\f2c4"; } + +.fa-square-x-twitter:before { + content: "\e61a"; } + +.fa-reacteurope:before { + content: "\f75d"; } + +.fa-medium:before { + content: "\f23a"; } + +.fa-medium-m:before { + content: "\f23a"; } + +.fa-amilia:before { + content: "\f36d"; } + +.fa-mixcloud:before { + content: "\f289"; } + +.fa-flipboard:before { + content: "\f44d"; } + +.fa-viacoin:before { + content: "\f237"; } + +.fa-critical-role:before { + content: "\f6c9"; } + +.fa-sitrox:before { + content: "\e44a"; } + +.fa-discourse:before { + content: "\f393"; } + +.fa-joomla:before { + content: "\f1aa"; } + +.fa-mastodon:before { + content: "\f4f6"; } + +.fa-airbnb:before { + content: "\f834"; } + +.fa-wolf-pack-battalion:before { + content: "\f514"; } + +.fa-buy-n-large:before { + content: "\f8a6"; } + +.fa-gulp:before { + content: "\f3ae"; } + +.fa-creative-commons-sampling-plus:before { + content: "\f4f1"; } + +.fa-strava:before { + content: "\f428"; } + +.fa-ember:before { + content: "\f423"; } + +.fa-canadian-maple-leaf:before { + content: "\f785"; } + +.fa-teamspeak:before { + content: "\f4f9"; } + +.fa-pushed:before { + content: "\f3e1"; } + +.fa-wordpress-simple:before { + content: "\f411"; } + +.fa-nutritionix:before { + content: "\f3d6"; } + +.fa-wodu:before { + content: "\e088"; } + +.fa-google-pay:before { + content: "\e079"; } + +.fa-intercom:before { + content: "\f7af"; } + +.fa-zhihu:before { + content: "\f63f"; } + +.fa-korvue:before { + content: "\f42f"; } + +.fa-pix:before { + content: "\e43a"; } + +.fa-steam-symbol:before { + content: "\f3f6"; } +:root, :host { + --fa-style-family-classic: 'Font Awesome 6 Free'; + --fa-font-regular: normal 400 1em/1 'Font Awesome 6 Free'; } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-style: normal; + font-weight: 400; + font-display: block; + src: url("../webfonts/FontAwesome6Free-Regular-400.woff2") format("woff2"), url("../webfonts/FontAwesome6Free-Regular-400.ttf") format("truetype"); } + +.far, +.fa-regular { + font-weight: 400; } +:root, :host { + --fa-style-family-classic: 'Font Awesome 6 Free'; + --fa-font-solid: normal 900 1em/1 'Font Awesome 6 Free'; } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-style: normal; + font-weight: 900; + font-display: block; + src: url("../webfonts/FontAwesome6Free-Solid-900.woff2") format("woff2"), url("../webfonts/FontAwesome6Free-Solid-900.ttf") format("truetype"); } + +.fas, +.fa-solid { + font-weight: 900; } +@font-face { + font-family: 'Font Awesome 6 Brands'; + font-display: block; + font-weight: 400; + src: url("../webfonts/FontAwesome6Brands-Regular-400.woff2") format("woff2"), url("../webfonts/FontAwesome6Brands-Regular-400.ttf") format("truetype"); } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-display: block; + font-weight: 900; + src: url("../webfonts/FontAwesome6Free-Solid-900.woff2") format("woff2"), url("../webfonts/FontAwesome6Free-Solid-900.ttf") format("truetype"); } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-display: block; + font-weight: 400; + src: url("../webfonts/../webfonts/FontAwesome6Free-Regular-400.woff2") format("woff2"), url("../webfonts/FontAwesome6Free-Regular-400.ttf") format("truetype"); } +@font-face { + font-family: 'Font Awesome 6 Free'; + font-display: block; + src: url("../webfonts/FontAwesome6Free-Solid-900.woff2") format("woff2"), url("../webfonts/FontAwesome6Free-Solid-900.ttf") format("truetype"); } + +@font-face { + font-family: 'Font Awesome 6 Brands'; + font-display: block; + src: url("../webfonts/FontAwesome6Brands-Regular-400.woff2") format("woff2"), url("../webfonts/FontAwesome6Brands-Regular-400.ttf") format("truetype"); } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-display: block; + src: url("../webfonts/FontAwesome6Free-Regular-400.woff2") format("woff2"), url("../../webfonts/FontAwesome6Free-Regular-400.ttf") format("truetype"); + unicode-range: U+F003,U+F006,U+F014,U+F016-F017,U+F01A-F01B,U+F01D,U+F022,U+F03E,U+F044,U+F046,U+F05C-F05D,U+F06E,U+F070,U+F087-F088,U+F08A,U+F094,U+F096-F097,U+F09D,U+F0A0,U+F0A2,U+F0A4-F0A7,U+F0C5,U+F0C7,U+F0E5-F0E6,U+F0EB,U+F0F6-F0F8,U+F10C,U+F114-F115,U+F118-F11A,U+F11C-F11D,U+F133,U+F147,U+F14E,U+F150-F152,U+F185-F186,U+F18E,U+F190-F192,U+F196,U+F1C1-F1C9,U+F1D9,U+F1DB,U+F1E3,U+F1EA,U+F1F7,U+F1F9,U+F20A,U+F247-F248,U+F24A,U+F24D,U+F255-F25B,U+F25D,U+F271-F274,U+F278,U+F27B,U+F28C,U+F28E,U+F29C,U+F2B5,U+F2B7,U+F2BA,U+F2BC,U+F2BE,U+F2C0-F2C1,U+F2C3,U+F2D0,U+F2D2,U+F2D4,U+F2DC; } + diff --git a/_extensions/quarto-ext/fontawesome/assets/css/latex-fontsize.css b/_extensions/quarto-ext/fontawesome/assets/css/latex-fontsize.css new file mode 100644 index 0000000..45545ec --- /dev/null +++ b/_extensions/quarto-ext/fontawesome/assets/css/latex-fontsize.css @@ -0,0 +1,30 @@ +.fa-tiny { + font-size: 0.5em; +} +.fa-scriptsize { + font-size: 0.7em; +} +.fa-footnotesize { + font-size: 0.8em; +} +.fa-small { + font-size: 0.9em; +} +.fa-normalsize { + font-size: 1em; +} +.fa-large { + font-size: 1.2em; +} +.fa-Large { + font-size: 1.5em; +} +.fa-LARGE { + font-size: 1.75em; +} +.fa-huge { + font-size: 2em; +} +.fa-Huge { + font-size: 2.5em; +} diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.ttf new file mode 100644 index 0000000..34a1436 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.woff2 new file mode 100644 index 0000000..d1a319f Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Brands-Regular-400.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.ttf new file mode 100644 index 0000000..d0aeac9 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.woff2 new file mode 100644 index 0000000..f3918d2 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Regular-400.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.ttf new file mode 100644 index 0000000..deab676 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.woff2 new file mode 100644 index 0000000..53c1987 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/FontAwesome6Free-Solid-900.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.ttf new file mode 100644 index 0000000..430a02e Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.woff2 new file mode 100644 index 0000000..4d904aa Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-brands-400.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.ttf new file mode 100644 index 0000000..23e3feb Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.woff2 new file mode 100644 index 0000000..80e3b12 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-regular-400.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.ttf new file mode 100644 index 0000000..da90824 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.woff2 new file mode 100644 index 0000000..360ba11 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-solid-900.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.ttf b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.ttf new file mode 100644 index 0000000..e9545ed Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.ttf differ diff --git a/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.woff2 b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.woff2 new file mode 100644 index 0000000..db5b0b9 Binary files /dev/null and b/_extensions/quarto-ext/fontawesome/assets/webfonts/fa-v4compatibility.woff2 differ diff --git a/_extensions/quarto-ext/fontawesome/fontawesome.lua b/_extensions/quarto-ext/fontawesome/fontawesome.lua new file mode 100644 index 0000000..ff64dca --- /dev/null +++ b/_extensions/quarto-ext/fontawesome/fontawesome.lua @@ -0,0 +1,84 @@ +local function ensureLatexDeps() + quarto.doc.use_latex_package("fontawesome5") +end + +local function ensureHtmlDeps() + quarto.doc.add_html_dependency({ + name = 'fontawesome6', + version = '0.1.0', + stylesheets = {'assets/css/all.css', 'assets/css/latex-fontsize.css'} + }) +end + +local function isEmpty(s) + return s == nil or s == '' +end + +local function isValidSize(size) + local validSizes = { + "tiny", + "scriptsize", + "footnotesize", + "small", + "normalsize", + "large", + "Large", + "LARGE", + "huge", + "Huge" + } + for _, v in ipairs(validSizes) do + if v == size then + return size + end + end + return "" +end + +return { + ["fa"] = function(args, kwargs) + + local group = "solid" + local icon = pandoc.utils.stringify(args[1]) + if #args > 1 then + group = icon + icon = pandoc.utils.stringify(args[2]) + end + + local title = pandoc.utils.stringify(kwargs["title"]) + if not isEmpty(title) then + title = " title=\"" .. title .. "\"" + end + + local label = pandoc.utils.stringify(kwargs["label"]) + if isEmpty(label) then + label = " aria-label=\"" .. icon .. "\"" + else + label = " aria-label=\"" .. label .. "\"" + end + + local size = pandoc.utils.stringify(kwargs["size"]) + + -- detect html (excluding epub which won't handle fa) + if quarto.doc.is_format("html:js") then + ensureHtmlDeps() + if not isEmpty(size) then + size = " fa-" .. size + end + return pandoc.RawInline( + 'html', + "" + ) + -- detect pdf / beamer / latex / etc + elseif quarto.doc.is_format("pdf") then + ensureLatexDeps() + if isEmpty(isValidSize(size)) then + return pandoc.RawInline('tex', "\\faIcon{" .. icon .. "}") + else + return pandoc.RawInline('tex', "{\\" .. size .. "\\faIcon{" .. icon .. "}}") + end + else + return pandoc.Null() + end + end +} diff --git a/_extensions/schochastics/academicons/_extension.yml b/_extensions/schochastics/academicons/_extension.yml new file mode 100644 index 0000000..f643e0b --- /dev/null +++ b/_extensions/schochastics/academicons/_extension.yml @@ -0,0 +1,6 @@ +title: Support for academicons +author: David Schoch +version: 0.3.0 +contributes: + shortcodes: + - academicons.lua diff --git a/_extensions/schochastics/academicons/academicons.lua b/_extensions/schochastics/academicons/academicons.lua new file mode 100644 index 0000000..9ba52fb --- /dev/null +++ b/_extensions/schochastics/academicons/academicons.lua @@ -0,0 +1,82 @@ +-- function ensureLatexDeps() +-- quarto.doc.useLatexPackage("academicons") +-- end + +local function ensureHtmlDeps() + quarto.doc.addHtmlDependency({ + name = "academicons", + version = "1.9.2", + stylesheets = { "assets/css/all.css", "assets/css/size.css" } + }) +end + +local function isEmpty(s) + return s == nil or s == '' +end + +local function isValidSize(size) + local validSizes = { + "tiny", "scriptsize", "footnotesize", "small", "normalsize", + "large", "Large", "LARGE", "huge", "Huge", + "1x", "2x", "3x", "4x", "5x", "6x", "7x", "8x", "9x", "10x", + "2xs", "xs", "sm", "lg", "xl", "2xl" + } + for _, v in ipairs(validSizes) do + if v == size then + return " ai-" .. size + end + end + return "" +end + +return { + ["ai"] = function(args, kwargs) + local group = "" + local icon = pandoc.utils.stringify(args[1]) + if #args > 1 then + group = icon + icon = pandoc.utils.stringify(args[2]) + end + + local size = isValidSize(pandoc.utils.stringify(kwargs["size"])) + local color = pandoc.utils.stringify(kwargs["color"]) + if not isEmpty(color) then + color = " style=\"color:" .. color .. "\"" + --else + -- color = " style=\"color:" .. "black" .. "\"" + end + + local title = pandoc.utils.stringify(kwargs["title"]) + if not isEmpty(title) then + title = " title=\"" .. title .. "\"" + end + + -- detect html (excluding epub) + if quarto.doc.isFormat("html:js") then + ensureHtmlDeps() + if isEmpty(size) then + local csize = pandoc.utils.stringify(kwargs["size"]) + if (isEmpty(csize)) then + csize = "" + else + csize = " style=\"font-size:" .. csize .. "\"" + end + return pandoc.RawInline( + 'html', + "" + ) + else + return pandoc.RawInline( + 'html', + "" + ) + end + -- detect pdf / beamer / latex / etc + -- elseif quarto.doc.isFormat("pdf") then + -- ensureLatexDeps() + -- return pandoc.RawInline('tex', "\\aiIcon{" .. icon .. "}") + else + return pandoc.Null() + end + end +} diff --git a/_extensions/schochastics/academicons/assets/css/all.css b/_extensions/schochastics/academicons/assets/css/all.css new file mode 100755 index 0000000..73114b0 --- /dev/null +++ b/_extensions/schochastics/academicons/assets/css/all.css @@ -0,0 +1,738 @@ +/* + * Academicons 1.9.4 by James Walsh (https://github.com/jpswalsh) and Katja Bercic (https://github.com/katjabercic) + * Fonts generated using FontForge - https://fontforge.org + * Square icons designed to be used alongside Font Awesome square icons - https://fortawesome.github.io/Font-Awesome/ + * Licenses - Font: SIL OFL 1.1, CSS: MIT License + */ +@font-face { + font-family: 'Academicons'; + font-style: normal; + font-weight: 400; + font-display: block; + src: url('../webfonts/academicons.eot'); + src: url('../webfonts/academicons.eot') format('embedded-opentype'), + url('../webfonts/academicons.ttf') format('truetype'), + url('../webfonts/academicons.woff') format('woff'), + url('../webfonts/academicons.svg') format('svg'); +} + +.ai { + font-family: 'Academicons'; + font-weight: 400; + -moz-osx-font-smoothing: grayscale; + -webkit-font-smoothing: antialiased; + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + line-height: 1; +} + +.ai-academia:before { + content: "\e9af"; +} + +.ai-academia-square:before { + content: "\e93d"; +} + +.ai-acclaim:before { + content: "\e92e"; +} + +.ai-acclaim-square:before { + content: "\e93a"; +} + +.ai-acm:before { + content: "\e93c"; +} + +.ai-acm-square:before { + content: "\e95d"; +} + +.ai-acmdl:before { + content: "\e96a"; +} + +.ai-acmdl-square:before { + content: "\e9d3"; +} + +.ai-ads:before { + content: "\e9cb"; +} + +.ai-ads-square:before { + content: "\e94a"; +} + +.ai-africarxiv:before { + content: "\e91b"; +} + +.ai-africarxiv-square:before { + content: "\e90b"; +} + +.ai-archive:before { + content: "\e955"; +} + +.ai-archive-square:before { + content: "\e956"; +} + +.ai-arxiv:before { + content: "\e974"; +} + +.ai-arxiv-square:before { + content: "\e9a6"; +} + +.ai-biorxiv:before { + content: "\e9a2"; +} + +.ai-biorxiv-square:before { + content: "\e98b"; +} + +.ai-ceur:before { + content: "\e96d"; +} + +.ai-ceur-square:before { + content: "\e92f"; +} + +.ai-ciencia-vitae:before { + content: "\e912"; +} + +.ai-ciencia-vitae-square:before { + content: "\e913"; +} + +.ai-clarivate:before { + content: "\e924"; +} + +.ai-clarivate-square:before { + content: "\e925"; +} + +.ai-closed-access:before { + content: "\e942"; +} + +.ai-closed-access-square:before { + content: "\e943"; +} + +.ai-conversation:before { + content: "\e94c"; +} + +.ai-conversation-square:before { + content: "\e915"; +} + +.ai-coursera:before { + content: "\e95f"; +} + +.ai-coursera-square:before { + content: "\e97f"; +} + +.ai-crossref:before { + content: "\e918"; +} + +.ai-crossref-square:before { + content: "\e919"; +} + +.ai-cv:before { + content: "\e9a5"; +} + +.ai-cv-square:before { + content: "\e90a"; +} + +.ai-datacite:before { + content: "\e91c"; +} + +.ai-datacite-square:before { + content: "\e91d"; +} + +.ai-dataverse:before { + content: "\e9f7"; +} + +.ai-dataverse-square:before { + content: "\e9e4"; +} + +.ai-dblp:before { + content: "\e94f"; +} + +.ai-dblp-square:before { + content: "\e93f"; +} + +.ai-depsy:before { + content: "\e97a"; +} + +.ai-depsy-square:before { + content: "\e94b"; +} + +.ai-doi:before { + content: "\e97e"; +} + +.ai-doi-square:before { + content: "\e98f"; +} + +.ai-dryad:before { + content: "\e97c"; +} + +.ai-dryad-square:before { + content: "\e98c"; +} + +.ai-elsevier:before { + content: "\e961"; +} + +.ai-elsevier-square:before { + content: "\e910"; +} + +.ai-figshare:before { + content: "\e981"; +} + +.ai-figshare-square:before { + content: "\e9e7"; +} + +.ai-google-scholar:before { + content: "\e9d4"; +} + +.ai-google-scholar-square:before { + content: "\e9f9"; +} + +.ai-hal:before { + content: "\e92c"; +} + +.ai-hal-square:before { + content: "\e92d"; +} + +.ai-hypothesis:before { + content: "\e95a"; +} + +.ai-hypothesis-square:before { + content: "\e95b"; +} + +.ai-ideas-repec:before { + content: "\e9ed"; +} + +.ai-ideas-repec-square:before { + content: "\e9f8"; +} + +.ai-ieee:before { + content: "\e929"; +} + +.ai-ieee-square:before { + content: "\e9b9"; +} + +.ai-impactstory:before { + content: "\e9cf"; +} + +.ai-impactstory-square:before { + content: "\e9aa"; +} + +.ai-inaturalist:before { + content: "\e900"; +} + +.ai-inaturalist-square:before { + content: "\e901"; +} + +.ai-inpn:before { + content: "\e902"; +} + +.ai-inpn-square:before { + content: "\e903"; +} + +.ai-inspire:before { + content: "\e9e9"; +} + +.ai-inspire-square:before { + content: "\e9fe"; +} + +.ai-isidore:before { + content: "\e936"; +} + +.ai-isidore-square:before { + content: "\e954"; +} + +.ai-isni:before { + content: "\e957"; +} + +.ai-isni-square:before { + content: "\e958"; +} + +.ai-jstor:before { + content: "\e938"; +} + +.ai-jstor-square:before { + content: "\e944"; +} + +.ai-lattes:before { + content: "\e9b3"; +} + +.ai-lattes-square:before { + content: "\e99c"; +} + +.ai-mathoverflow:before { + content: "\e9f6"; +} + +.ai-mathoverflow-square:before { + content: "\e97b"; +} + +.ai-mendeley:before { + content: "\e9f0"; +} + +.ai-mendeley-square:before { + content: "\e9f3"; +} + +.ai-moodle:before { + content: "\e907"; +} + +.ai-moodle-square:before { + content: "\e908"; +} + +.ai-mtmt:before { + content: "\e950"; +} + +.ai-mtmt-square:before { + content: "\e951"; +} + +.ai-nakala:before { + content: "\e940"; +} + +.ai-nakala-square:before { + content: "\e941"; +} + +.ai-obp:before { + content: "\e92a"; +} + +.ai-obp-square:before { + content: "\e92b"; +} + +.ai-open-access:before { + content: "\e939"; +} + +.ai-open-access-square:before { + content: "\e9f4"; +} + +.ai-open-data:before { + content: "\e966"; +} + +.ai-open-data-square:before { + content: "\e967"; +} + +.ai-open-materials:before { + content: "\e968"; +} + +.ai-open-materials-square:before { + content: "\e969"; +} + +.ai-openedition:before { + content: "\e946"; +} + +.ai-openedition-square:before { + content: "\e947"; +} + +.ai-orcid:before { + content: "\e9d9"; +} + +.ai-orcid-square:before { + content: "\e9c3"; +} + +.ai-osf:before { + content: "\e9ef"; +} + +.ai-osf-square:before { + content: "\e931"; +} + +.ai-overleaf:before { + content: "\e914"; +} + +.ai-overleaf-square:before { + content: "\e98d"; +} + +.ai-philpapers:before { + content: "\e98a"; +} + +.ai-philpapers-square:before { + content: "\e96f"; +} + +.ai-piazza:before { + content: "\e99a"; +} + +.ai-piazza-square:before { + content: "\e90c"; +} + +.ai-preregistered:before { + content: "\e906"; +} + +.ai-preregistered-square:before { + content: "\e96b"; +} + +.ai-protocols:before { + content: "\e952"; +} + +.ai-protocols-square:before { + content: "\e953"; +} + +.ai-psyarxiv:before { + content: "\e90e"; +} + +.ai-psyarxiv-square:before { + content: "\e90f"; +} + +.ai-publons:before { + content: "\e937"; +} + +.ai-publons-square:before { + content: "\e94e"; +} + +.ai-pubmed:before { + content: "\e99f"; +} + +.ai-pubmed-square:before { + content: "\e97d"; +} + +.ai-pubpeer:before { + content: "\e922"; +} + +.ai-pubpeer-square:before { + content: "\e923"; +} + +.ai-researcherid:before { + content: "\e91a"; +} + +.ai-researcherid-square:before { + content: "\e95c"; +} + +.ai-researchgate:before { + content: "\e95e"; +} + +.ai-researchgate-square:before { + content: "\e99e"; +} + +.ai-ror:before { + content: "\e948"; +} + +.ai-ror-square:before { + content: "\e949"; +} + +.ai-sci-hub:before { + content: "\e959"; +} + +.ai-sci-hub-square:before { + content: "\e905"; +} + +.ai-scirate:before { + content: "\e98e"; +} + +.ai-scirate-square:before { + content: "\e99d"; +} + +.ai-scopus:before { + content: "\e91e"; +} + +.ai-scopus-square:before { + content: "\e91f"; +} + +.ai-semantic-scholar:before { + content: "\e96e"; +} + +.ai-semantic-scholar-square:before { + content: "\e96c"; +} + +.ai-springer:before { + content: "\e928"; +} + +.ai-springer-square:before { + content: "\e99b"; +} + +.ai-ssrn:before { + content: "\e916"; +} + +.ai-ssrn-square:before { + content: "\e917"; +} + +.ai-stackoverflow:before { + content: "\e920"; +} + +.ai-stackoverflow-square:before { + content: "\e921"; +} + +.ai-viaf:before { + content: "\e933"; +} + +.ai-viaf-square:before { + content: "\e934"; +} + +.ai-wiley:before { + content: "\e926"; +} + +.ai-wiley-square:before { + content: "\e927"; +} + +.ai-zenodo:before { + content: "\e911"; +} + +.ai-zotero:before { + content: "\e962"; +} + +.ai-zotero-square:before { + content: "\e932"; +} + +/* Duplication of the FontAwesome style classes using 'ai' in place of 'fa'. */ +.ai-lg { + font-size: 1.33333em; + line-height: 0.75em; + vertical-align: -.0667em; +} + +.ai-xs { + font-size: .75em; +} + +.ai-sm { + font-size: .875em; +} + +.ai-1x { + font-size: 1em; +} + +.ai-2x { + font-size: 2em; +} + +.ai-3x { + font-size: 3em; +} + +.ai-4x { + font-size: 4em; +} + +.ai-5x { + font-size: 5em; +} + +.ai-6x { + font-size: 6em; +} + +.ai-7x { + font-size: 7em; +} + +.ai-8x { + font-size: 8em; +} + +.ai-9x { + font-size: 9em; +} + +.ai-10x { + font-size: 10em; +} + +.ai-fw { + text-align: center; + width: 1.25em; +} + +.ai-ul { + list-style-type: none; + margin-left: 2.5em; + padding-left: 0; +} + +.ai-ul>li { + position: relative; +} + +.ai-li { + left: -2em; + position: absolute; + text-align: center; + width: 2em; + line-height: inherit; +} + +.ai-border { + border: solid 0.08em #eee; + border-radius: .1em; + padding: .2em .25em .15em; +} + +.ai-pull-left { + float: left; +} + +.ai-pull-right { + float: right; +} + +.ai.ai-pull-left { + margin-right: .3em; +} + +.ai.ai-pull-right { + margin-right: .3em; +} + +.ai-stack { + display: inline-block; + height: 2em; + line-height: 2em; + position: relative; + vertical-align: middle; + width: 2.5em; +} + +.ai-stack-1x, +.ai-stack-2x { + left: 0; + position: absolute; + text-align: center; + width: 100%; +} + +.ai-stack-1x { + line-height: inherit; +} + +.ai-stack-2x { + font-size: 2em; +} + +.ai-inverse { + color: #fff; +} \ No newline at end of file diff --git a/_extensions/schochastics/academicons/assets/css/size.css b/_extensions/schochastics/academicons/assets/css/size.css new file mode 100644 index 0000000..590965f --- /dev/null +++ b/_extensions/schochastics/academicons/assets/css/size.css @@ -0,0 +1,115 @@ +.ai-tiny { + font-size: 0.5em; +} + +.ai-scriptsize { + font-size: 0.7em; +} + +.ai-footnotesize { + font-size: 0.8em; +} + +.ai-small { + font-size: 0.9em; +} + +.ai-normalsize { + font-size: 1em; +} + +.ai-large { + font-size: 1.2em; +} + +.ai-Large { + font-size: 1.5em; +} + +.ai-LARGE { + font-size: 1.75em; +} + +.ai-huge { + font-size: 2em; +} + +.ai-Huge { + font-size: 2.5em; +} + +.ai-1x { + font-size: 1em; +} + +.ai-2x { + font-size: 2em; +} + +.ai-3x { + font-size: 3em; +} + +.ai-4x { + font-size: 4em; +} + +.ai-5x { + font-size: 5em; +} + +.ai-6x { + font-size: 6em; +} + +.ai-7x { + font-size: 7em; +} + +.ai-8x { + font-size: 8em; +} + +.ai-9x { + font-size: 9em; +} + +.ai-10x { + font-size: 10em; +} + +.ai-2xs { + font-size: 0.625em; + line-height: 0.1em; + vertical-align: 0.225em; +} + +.ai-xs { + font-size: 0.75em; + line-height: 0.08333em; + vertical-align: 0.125em; +} + +.ai-sm { + font-size: 0.875em; + line-height: 0.07143em; + vertical-align: 0.05357em; +} + +.ai-lg { + font-size: 1.25em; + line-height: 0.05em; + vertical-align: -0.075em; +} + +.ai-xl { + font-size: 1.5em; + line-height: 0.04167em; + vertical-align: -0.125em; +} + +.ai-2xl { + font-size: 2em; + line-height: 0.03125em; + vertical-align: -0.1875em; +} diff --git a/_extensions/schochastics/academicons/assets/webfonts/academicons.eot b/_extensions/schochastics/academicons/assets/webfonts/academicons.eot new file mode 100644 index 0000000..37ae5e3 Binary files /dev/null and b/_extensions/schochastics/academicons/assets/webfonts/academicons.eot differ diff --git a/_extensions/schochastics/academicons/assets/webfonts/academicons.svg b/_extensions/schochastics/academicons/assets/webfonts/academicons.svg new file mode 100644 index 0000000..d72201d --- /dev/null +++ b/_extensions/schochastics/academicons/assets/webfonts/academicons.svg @@ -0,0 +1,1859 @@ + + + + + +Created by FontForge 20190801 at Thu Jun 1 11:28:32 2023 + By Nicolas + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_extensions/schochastics/academicons/assets/webfonts/academicons.ttf b/_extensions/schochastics/academicons/assets/webfonts/academicons.ttf new file mode 100644 index 0000000..ca7b48c Binary files /dev/null and b/_extensions/schochastics/academicons/assets/webfonts/academicons.ttf differ diff --git a/_extensions/schochastics/academicons/assets/webfonts/academicons.woff b/_extensions/schochastics/academicons/assets/webfonts/academicons.woff new file mode 100644 index 0000000..9d631dd Binary files /dev/null and b/_extensions/schochastics/academicons/assets/webfonts/academicons.woff differ diff --git a/_freeze/index/execute-results/html.json b/_freeze/index/execute-results/html.json index 5babd09..c1b64ee 100644 --- a/_freeze/index/execute-results/html.json +++ b/_freeze/index/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "82ce7bf9669d6fa76ae9995f703ec7c9", + "hash": "a9469b81d9193a48f9ce1b5b4f6df831", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Same data, different analysts: variation in effect sizes due to analytical decisions in ecology and evolutionary biology.\"\nabstract: |\n Although variation in effect sizes and predicted values among studies of similar phenomena is inevitable, such variation far exceeds what might be produced by sampling error alone. One possible explanation for variation among results is differences among researchers in the decisions they make regarding statistical analyses. A growing array of studies has explored this analytical variability in different fields and has found substantial variability among results despite analysts having the same data and research question. Many of these studies have been in the social sciences, but one small ‘many analyst’ study found similar variability in ecology. We expanded the scope of this prior work by implementing a large-scale empirical exploration of the variation in effect sizes and model predictions generated by the analytical decisions of different researchers in ecology and evolutionary biology. We used two unpublished datasets, one from evolutionary ecology (blue tit, *Cyanistes caeruleus*, to compare sibling number and nestling growth) and one from conservation ecology (*Eucalyptus*, to compare grass cover and tree seedling recruitment), and the project leaders recruited 174 analyst teams, comprising 246 analysts, to investigate the answers to prespecified research questions. Analyses conducted by these teams yielded 141 usable effects (compatible with our meta-analyses and all necessary information provided) for the blue tit dataset, and 85 usable effects for the *Eucalyptus* dataset. We found substantial heterogeneity among results for both datasets, although the patterns of variation differed between them. For the blue tit analyses, the average effect was convincingly negative, with less growth for nestlings living with more siblings, but there was near continuous variation in effect size from large negative effects to effects near zero, and even effects crossing the traditional threshold of statistical significance in the opposite direction. In contrast, the average relationship between grass cover and *Eucalyptus* seedling number was only slightly negative and not convincingly different from zero, and most effects ranged from weakly negative to weakly positive, with about a third of effects crossing the traditional threshold of significance in one direction or the other. However, there were also several striking outliers in the *Eucalyptus* dataset, with effects far from zero. For both datasets, we found substantial variation in the variable selection and random effects structures among analyses, as well as in the ratings of the analytical methods by peer reviewers, but we found no strong relationship between any of these and deviation from the meta-analytic mean. In other words, analyses with results that were far from the mean were no more or less likely to have dissimilar variable sets, use random effects in their models, or receive poor peer reviews than those analyses that found results that were close to the mean. The existence of substantial variability among analysis outcomes raises important questions about how ecologists and evolutionary biologists should interpret published results, and how they should conduct analyses in the future. \nauthors:\n - name: \"Elliot Gould\"\n orcid: \"0000-0002-6585-538X\"\n affiliation:\n - name: The University of Melbourne\n department: School of Agriculture Food and Ecosystem Sciences\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Hannah S. Fraser\"\n orcid: \"0000-0003-2443-4463\"\n affiliation:\n - name: The University of Melbourne\n department: School of Historical and Philosophical Studies\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Timothy H. Parker\"\n email: \"parkerth@whitman.edu\"\n orcid: \"0000-0003-2995-5284\"\n attributes:\n corresponding: true\n roles:\n - Investigation\n - Manuscript Writing\n affiliation:\n - name: Whitman College\n department: Department of Biology\n - name: \"Shinichi Nakagawa\"\n orcid: \"0000-0002-7765-5182\"\n affiliation:\n - name: The University of New South Wales\n department: School of Biological, Earth & Environmental Sciences\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Simon C. Griffith\"\n orcid: \"0000-0001-7612-4999\"\n affiliation:\n - name: Macquarie University\n department: chool of Natural Sciences\n roles:\n - Manuscript Writing\n - name: \"Peter A. Vesk\"\n orcid: \"0000-0003-2008-7062\"\n affiliation:\n - name: The University of Melbourne\n department: School of Biological, Earth & Environmental Sciences\n roles:\n - Manuscript Writing\n - name: \"Fiona Fidler\"\n orcid: \"0000-0002-2700-2562\"\n affiliation:\n - name: The University of Melbourne\n department: School of Historical and Philosophical Studies\n roles:\n - Manuscript Writing\n - name: \"Daniel G. Hamilton\"\n orcid: \"0000-0001-8104-474X\"\n affiliation:\n - name: \"The University of Melbourne\"\n department: \"School of BioSciences\"\n - name: \"Robin N Abbey-Lee\" \n affiliation: \n - name: \"Länsstyrelsen Östergötland\" \n department: \"\"\n - name: \"Jessica K. Abbott\" \n orcid: \"0000-0002-8743-2089\" \n affiliation: \n - name: \"Lund University\" \n department: \"Biology Department\"\n - name: \"Luis A. Aguirre\" \n orcid: \"0000-0001-9796-9755\" \n affiliation: \n - name: \"University of Massachusetts\" \n department: \"Department of Biology\"\n - name: \"Carles Alcaraz\" \n orcid: \"0000-0002-2147-4796\" \n affiliation: \n - name: \"IRTA\" \n department: \"Marine and Continental Waters\"\n - name: \"Irith Aloni\"\n orcid: \"0000-0002-7777-3365\"\n affiliation:\n - name: \"Ben Gurion University of the Negev\"\n department: \"Dept. of Life Sciences\"\n - name: \"Drew Altschul\" \n orcid: \"0000-0001-7053-4209\" \n affiliation: \n - name: \"The University of Edinburgh\" \n department: \"Department of Psychology\"\n - name: \"Kunal Arekar\" \n orcid: \"0000-0003-1060-5911\" \n affiliation: \n - name: \"Indian Institute of Science\" \n department: \"Centre for Ecological Sciences\"\n - name: \"Jeff W. Atkins\" \n orcid: \"0000-0002-2295-3131\" \n affiliation: \n - name: \"USDA Forest Service\"\n department: \"Southern Research Station\"\n - name: \"Joe Atkinson\" \n orcid: \"0000-0001-9232-4421\" \n affiliation: \n - name: \"Aarhus University\" \n department: \"Center for Ecological Dynamics in a Novel Biosphere (ECONOVO), Department of Biology\"\n - name: \"Christopher M. Baker\"\n orcid: \"0000-0001-9449-3632\"\n affiliation:\n - name: \"The University of Melbourne\"\n department: \"School of Mathematics and Statistics\"\n - name: \"Meghan Barrett\" \n affiliation: \n - name: \"Indiana University Purdue University Indianapolis\" \n department: \"Biology\"\n - name: \"Kristian Bell\"\n orcid: \"0000-0002-1857-6257\"\n affiliation: \n - name: \"Deakin University\"\n department: \"School of Life and Environmental Sciences\"\n - name: \"Suleiman Kehinde Bello\" \n orcid: \"0000-0001-6718-9256\" \n affiliation: \n - name: \"King Abdulaziz University\" \n department: \"Department of Arid Land Agriculture\"\n - name: \"Iván Beltrán\"\n orcid: \"0000-0003-4439-8391\"\n affiliation:\n - name: \"Macquarie University\"\n department: \"Department of Biological Sciences\"\n - name: \"Bernd J. Berauer\" \n orcid: \"0000-0002-9472-1532\" \n affiliation: \n - name: \"University of Hohenheim, Institute of Landscape and Plant Ecology\" \n department: \"Department of Plant Ecology\"\n - name: \"Michael Grant Bertram\" \n orcid: \"0000-0001-5320-8444\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Peter D. Billman\" \n orcid: \"0000-0002-4072-4965\" \n affiliation: \n - name: \"University of Connecticut\" \n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Charlie K Blake\" \n orcid: \"0000-0003-4649-3472\" \n affiliation: \n - name: \"Southern Illinois University Edwardsville\" \n department: \"STEM Center\"\n - name: \"Shannon Blake\" \n affiliation: \n - name: \"University of Guelph\" \n department: \"\"\n - name: \"Louis Bliard\" \n orcid: \"0000-0002-2349-8513\" \n affiliation: \n - name: \"University of Zurich\" \n department: \"Department of Evolutionary Biology and Environmental Studies\"\n - name: \"Andrea Bonisoli-Alquati\" \n orcid: \"0000-0002-9255-7556\" \n affiliation: \n - name: \"California State Polytechnic University, Pomona\" \n department: \"Department of Biological Sciences\"\n - name: \"Timothée Bonnet\" \n orcid: \"0000-0001-7186-5288\" \n affiliation: \n - name: \"UMR 7372 Université de la Rochelle - Centre National de la Recherche Scientifique\" \n department: \"Centre d'Études Biologiques de Chizé\"\n - name: \"Camille Nina Marion Bordes\" \n orcid: \"0000-0002-3561-2811\" \n affiliation: \n - name: \"Bar Ilan University\" \n department: \"Faculty of Life Sciences\"\n - name: \"Aneesh P. H. Bose\" \n orcid: \"0000-0001-5716-0097\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Thomas Botterill-James\" \n orcid: \"0000-0002-6186-5871\" \n affiliation: \n - name: \"University of Tasmania\" \n department: \"School of Natural Sciences\"\n - name: \"Melissa Anna Boyd\" \n orcid: \"0000-0003-2681-8567\" \n affiliation: \n - name: \"Whitebark Institute\" \n department: \"\"\n - name: \"Sarah A. Boyle\" \n orcid: \"0000-0001-9498-6787\" \n affiliation: \n - name: \"Rhodes College\" \n department: \"Department of Biology\"\n - name: \"Tom Bradfer-Lawrence\" \n orcid: \"0000-0001-6045-4360\" \n affiliation: \n - name: \"RSPB\" \n department: \"Centre for Conservation Science\"\n - name: \"Jennifer Bradham\"\n orcid: \"\"\n affiliation:\n - name: \"Wofford College\"\n department: \"Environmental Studies\"\n - name: \"Jack A Brand\" \n orcid: \"0000-0003-3312-941X\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Martin I. Brengdahl\" \n orcid: \"0000-0002-1052-7274\" \n affiliation: \n - name: \"Linköping University\" \n department: \"IFM Biology\"\n - name: \"Martin Bulla\" \n orcid: \"0000-0002-1328-1927\" \n affiliation: \n - name: \"Czech University of Life Sciences Prague\" \n department: \"Faculty of Environmental Sciences\"\n - name: \"Luc Bussière\" \n orcid: \"0000-0001-8937-8381\" \n affiliation: \n - name: \"University of Gothenburg\" \n department: \"Biological and Environmental Sciences & Gothenburg Global Biodiversity Centre\"\n - name: \"Ettore Camerlenghi\" \n orcid: \"0000-0002-6203-069X\" \n affiliation: \n - name: \"Monash University\" \n department: \"School of Biological Sciences\"\n - name: \"Sara E. Campbell\" \n orcid: \"0000-0001-7195-8898\" \n affiliation: \n - name: \"University of Tennessee Knoxville\" \n department: \"Ecology and Evolutionary Biology\"\n - name: \"Leonardo L F Campos\" \n orcid: \"0000-0002-0186-8552\" \n affiliation: \n - name: \"Universidade Federal de Santa Catarina\" \n department: \"Departamento de Ecologia e Zoologia\"\n - name: \"Anthony Caravaggi\" \n orcid: \"0000-0002-1763-8970\" \n affiliation: \n - name: \"University of South Wales\" \n department: \"School of Biological and Forensic Sciences\"\n - name: \"Pedro Cardoso\" \n orcid: \"0000-0001-8119-9960\" \n affiliation: \n - name: \"Faculdade de Ciências, Universidade de Lisboa\" \n department: \"Centre for Ecology, Evolution and Environmental Changes (cE3c) & CHANGE - Global Change and Sustainability Institute\"\n - name: \"Charles J.W. Carroll\" \n affiliation: \n - name: \"Colorado State University\" \n department: \"Forest and Rangeland Stewardship\"\n - name: \"Therese A. Catanach\" \n orcid: \"0000-0003-3850-1196\" \n affiliation: \n - name: \"Academy of Natural Sciences of Drexel University\" \n department: \"Department of Ornithology\"\n - name: \"Xuan Chen\" \n orcid: \"0000-0002-9499-0054\" \n affiliation: \n - name: \"Salisbury University\" \n department: \"Department of Biological Sciences\"\n - name: \"Heung Ying Janet Chik\" \n orcid: \"0000-0003-4646-4444\" \n affiliation: \n - name: \"University of Groningen\" \n department: \"Groningen Institute for Evolutionary Life Sciences\"\n - name: \"Emily Sarah Choy\" \n orcid: \" 0000-0002-4703-4318\" \n affiliation: \n - name: \"McMaster University\" \n department: \"Department of Biology\"\n - name: \"Alec Philip Christie\" \n orcid: \"0000-0002-8465-8410\" \n affiliation: \n - name: \"University of Cambridge \" \n department: \"Department of Zoology\"\n - name: \"Angela Chuang\" \n orcid: \"0000-0001-6847-5115\" \n affiliation: \n - name: \"University of Florida\" \n department: \"Entomology and Nematology\"\n - name: \"Amanda J. Chunco\" \n orcid: \"0000-0002-8265-327X\" \n affiliation: \n - name: \"Elon University\" \n department: \"Environmental Studies\"\n - name: \"Bethany L Clark\" \n orcid: \"0000-0001-5803-7744\" \n affiliation: \n - name: \"BirdLife International\" \n department: \"\"\n - name: \"Andrea Contina\"\n orcid: \"0000-0002-0484-6711\"\n affiliation:\n - name: \"The University of Texas Rio Grande Valley\"\n department: \"School of Integrative Biological and Chemical Sciences\"\n - name: \"Garth A Covernton\"\n orcid: \"0000-0003-3814-4918\"\n affiliation:\n - name: \"University of Toronto\"\n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Murray P. Cox\" \n orcid: \"0000-0003-1936-0236\" \n affiliation: \n - name: \"University of Auckland\" \n department: \"Department of Statistics\"\n - name: \"Kimberly A. Cressman\" \n affiliation: \n - name: \"Catbird Stats, LLC\" \n department: \"\"\n - name: \"Marco Crotti\"\n orcid: \"0000-0002-8619-7988\"\n affiliation:\n - name: \"University of Glasgow\"\n department: \"School of Biodiversity, One Health & Veterinary Medicine\"\n - name: \"Connor Davidson Crouch\" \n orcid: \"0000-0003-0353-5820\" \n affiliation: \n - name: \"Northern Arizona University\" \n department: \"School of Forestry\"\n - name: \"Pietro B. D'Amelio\" \n orcid: \"0000-0002-4095-6088\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Behavioural Neurobiology\"\n - name: \"Alexandra Allison de Sousa\" \n orcid: \"0000-0003-2379-3894\" \n affiliation: \n - name: \"Bath Spa University\" \n department: \"School of Sciences: Center for Health and Cognition\"\n - name: \"Timm Fabian Döbert\" \n orcid: \"0000-0002-1601-8665\" \n affiliation: \n - name: \"University of Alberta\" \n department: \"Department of Biological Sciences\"\n - name: \"Ralph Dobler\" \n affiliation: \n - name: \"TU Dresden\" \n department: \"Applied Zoology\"\n - name: \"Adam J Dobson\" \n orcid: \"0000-0003-1541-927X\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"School of Molecular Biosciences, College of Medical Veterinary & Life Sciences,\"\n - name: \"Tim S. Doherty\" \n affiliation: \n - name: \"The University of Sydney\" \n department: \"School of Life and Environmental Sciences\"\n - name: \"Szymon Marian Drobniak\" \n orcid: \"0000-0001-8101-6247\" \n affiliation: \n - name: \"Jagiellonian University\" \n department: \"Institute of Environmental Sciences\"\n - name: \"Alexandra Grace Duffy\" \n orcid: \"0000-0002-7069-5384\" \n affiliation: \n - name: \"Brigham Young University\" \n department: \"Biology Department\"\n - name: \"Alison B. Duncan\"\n orcid: \"0000-0002-6499-2913\"\n affiliation:\n - name: \"University of Montpellier, CNRS, IRD.\"\n department: \"Institute of Evolutionary Sciences Montpellier,\"\n - name: \"Robert P. Dunn\" \n orcid: \"0000-0002-6356-4458\" \n affiliation: \n - name: \"University of South Carolina\" \n department: \"Baruch Marine Field Laboratory\"\n - name: \"Jamie Dunning\" \n affiliation: \n - name: \"Imperial College London\" \n department: \"Department of Life Sciences\"\n - name: \"Trishna Dutta\"\n orcid: \"0000-0002-5236-2658\"\n affiliation:\n - name: \"European Forest Institute\"\n department: \"\"\n - name: \"Luke Eberhart-Hertel\" \n orcid: \"0000-0001-7311-6088\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Ornithology\"\n - name: \"Jared Alan Elmore\" \n orcid: \"0000-0002-5980-1561\" \n affiliation: \n - name: \"Clemson University\" \n department: \"Forestry and Environmental Conservation, National Bobwhite and Grassland Initiative\"\n - name: \"Mahmoud Medhat Elsherif\" \n orcid: \"0000-0002-0540-3998\" \n affiliation: \n - name: \"University of Birmingham, Baily Thomas Grant\" \n department: \"Department of Psychology and Vision Science\"\n - name: \"Holly M English\" \n orcid: \"0000-0002-8854-6707\" \n affiliation: \n - name: \"University College Dublin\" \n department: \"School of Biology and Environmental Science\"\n - name: \"David C. Ensminger\" \n orcid: \"0000-0001-5554-1638\" \n affiliation: \n - name: \"San José State University\" \n department: \"Department of Biological Sciences\"\n - name: \"Ulrich Rainer Ernst\" \n orcid: \"0000-0002-6330-5341\" \n affiliation: \n - name: \"University of Hohenheim\" \n department: \"Apicultural State Institute\"\n - name: \"Stephen M. Ferguson\" \n orcid: \"0000-0003-1577-2727\" \n affiliation: \n - name: \"St. Norbert College\" \n department: \"Department of Biology\"\n - name: \"Esteban Fernandez-Juricic\"\n orcid: \"0000-0001-5290-8078\"\n affiliation:\n - name: \"Purdue University\"\n department: \"Department of Biological Sciences\"\n - name: \"Thalita Ferreira-Arruda Ferreira-Arruda\" \n orcid: \"0000-0003-1385-0226\" \n affiliation: \n - name: \"University of Göttingen\"\n department: \"Biodiversity, Macroecology & Biogeography, Faculty of Forest Sciences and Forest Ecology\"\n - name: \"John Fieberg\" \n orcid: \"0000-0002-3180-7021\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife, and Conservation Biology\"\n - name: \"Elizabeth A Finch\" \n orcid: \"0000-0002-7031-5708\" \n affiliation: \n - name: \"CABI\" \n department: \"\"\n - name: \"Evan A. Fiorenza\" \n orcid: \"0000-0002-5421-0148\" \n affiliation: \n - name: \"University of California, Irvine\" \n department: \"Department of Ecology and Evolutionary Biology, School of Biological Sciences\"\n - name: \"David N Fisher\" \n orcid: \"0000-0002-4444-4450\" \n affiliation: \n - name: \"University of Aberdeen\" \n department: \"School of Biological Sciences\"\n - name: \"Amélie Fontaine\"\n orcid: \"\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Wolfgang Forstmeier\" \n orcid: \"0000-0002-5984-8925\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Ornithology\"\n - name: \"Yoan Fourcade\" \n orcid: \"0000-0003-3820-946X\" \n affiliation: \n - name: \"Univ. Paris-Est Creteil\" \n department: \"Institute of Ecology and Environmental Sciences (iEES)\"\n - name: \"Graham S. Frank\" \n orcid: \"0000-0002-0151-3807\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society\"\n - name: \"Cathryn A. Freund\" \n orcid: \"0000-0002-1570-5519\" \n affiliation: \n - name: \"Wake Forest University\" \n department: \"\"\n - name: \"Eduardo Fuentes-Lillo\"\n orcid: \"0000-0001-5657-954X\"\n affiliation:\n - name: \"Instituto de Ecología y Biodiversidad\"\n department: \"Laboratorio de Invasiones Biológicas (LIB)\"\n - name: \"Sara L. Gandy\" \n orcid: \"0000-0003-2579-4479\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"Institute for Biodiversity, Animal Health and Comparative Medicine\"\n - name: \"Dustin G. Gannon\" \n orcid: \"0000-0002-6936-8626\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society, College of Forestry\"\n - name: \"Ana I. García-Cervigón\" \n orcid: \"0000-0001-6651-2445\" \n affiliation: \n - name: \"Rey Juan Carlos University\" \n department: \"Biodiversity and Conservation Area\"\n - name: \"Alexis C. Garretson\"\n orcid: \"0000-0002-7260-0131\"\n affiliation:\n - name: \"Tufts University\"\n department: \"Graduate School of Biomedical Sciences\"\n - name: \"Xuezhen Ge\"\n orcid: \"0000-0002-5527-6720\"\n affiliation:\n - name: \"University of Guelph\"\n department: \"Department of Integrative Biology\"\n - name: \"William L. Geary\"\n orcid: \"0000-0002-6520-689X\"\n affiliation:\n - name: \"Deakin University\"\n department: \"School of Life and Environmental Sciences (Burwood Campus)\"\n - name: \"Charly Géron\" \n orcid: \"0000-0001-7912-4708\" \n affiliation: \n - name: \"University of Rennes\" \n department: \"CNRS\"\n - name: \"Marc Gilles\" \n orcid: \"0000-0003-4222-9754\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Department of Behavioural Ecology\"\n - name: \"Antje Girndt\" \n orcid: \"0000-0002-9558-1201\" \n affiliation: \n - name: \"Universität Bielefeld\" \n department: \"Fakultät für Biologie, Arbeitsgruppe Evolutionsbiologie\"\n - name: \"Daniel Gliksman\" \n affiliation: \n - name: \"Technische Universität Dresden\" \n department: \"Chair of Meteorology, Institute for Hydrology and Meteorology, Faculty of Environmental Sciences\"\n - name: \"Harrison B Goldspiel\" \n orcid: \"0000-0001-9193-8165\" \n affiliation: \n - name: \"University of Maine\" \n department: \"Department of Wildlife, Fisheries, and Conservation Biology\"\n - name: \"Dylan G. E. Gomes\" \n orcid: \"0000-0002-2642-3728\" \n affiliation: \n - name: \"Boise State University\" \n department: \"Department of Biological Sciences\"\n - name: \"Megan Kate Good\"\n orcid: \"0000-0002-6908-1633\"\t \n affiliation:\t \n - name: \"The University of Melbourne\"\t \n department: \"School of Agriculture, Food and Ecosystem Sciences\"\n - name: \"Sarah C Goslee\" \n orcid: \"0000-0002-5939-3297\" \n affiliation: \n - name: \"USDA Agricultural Research Service\" \n department: \"Pastures Systems and Watershed Management Research Unit\"\n - name: \"J. Stephen Gosnell\" \n orcid: \"0000-0002-2103-2728\" \n affiliation: \n - name: \"Baruch College, City University of New York\" \n department: \"Department of Natural Sciences\"\n - name: \"Eliza M. Grames\"\n orcid: \"0000-0003-1743-6815\"\n affiliation:\n - name: \"Binghamton University\"\n department: \"Department of Biological Sciences\"\n - name: \"Paolo Gratton\" \n orcid: \"0000-0001-8464-4062\" \n affiliation: \n - name: \"Università di Roma 'Tor Vergata\"\n department: \"Dipartimento di Biologia\"\n - name: \"Nicholas M. Grebe\" \n orcid: \"0000-0003-1411-065X\" \n affiliation: \n - name: \"University of Michigan\" \n department: \"Department of Anthropology\"\n - name: \"Skye M. Greenler\" \n orcid: \"0000-0002-4454-8970\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"College of Forestry\"\n - name: \"Maaike Griffioen\"\n orcid: \"0000-0002-9311-8811\"\n affiliation:\n - name: \"University of Antwerp\"\n department: \"\"\n - name: \"Daniel M Griffith\" \n orcid: \"0000-0001-7463-4004\" \n affiliation: \n - name: \"Wesleyan University\" \n department: \"Earth & Environmental Sciences\"\n - name: \"Frances J. Griffith\" \n orcid: \"0000-0001-9238-0212\" \n affiliation: \n - name: \"Yale University\" \n department: \"Yale School of Medicine, Department of Psychiatry\"\n - name: \"Jake J. Grossman\" \n orcid: \"0000-0001-6468-8551\" \n affiliation: \n - name: \"St. Olaf College\" \n department: \"Biology Department and Environmental Studies Department\"\n - name: \"Ali Güncan\" \n orcid: \"0000-0003-1765-648X\" \n affiliation: \n - name: \"Ordu Uniersity\" \n department: \"Department of Plant Protection, Faculty of Agriculture\"\n - name: \"Stef Haesen\" \n orcid: \"0000-0002-4491-4213\" \n affiliation: \n - name: \"KU Leuven\" \n department: \"Department of Earth and Environmental Sciences\"\n - name: \"James G. Hagan\" \n orcid: \"0000-0002-7504-3393\" \n affiliation: \n - name: \"University of Gothenburg\" \n department: \"Department of Marine Sciences\"\n - name: \"Heather A. Hager\"\n orcid: \"0000-0002-0066-6844\"\n affiliation:\n - name: \"Wilfrid Laurier University\"\n department: \"Department of Biology\"\n - name: \"Jonathan Philo Harris\"\n orcid: \"0000-0003-4428-903X\"\n affiliation:\n - name: \"Iowa State University\"\n department: \"Natural Resource Ecology and Management\"\n - name: \"Natasha Dean Harrison\" \n orcid: \"0000-0001-5779-0187\" \n affiliation: \n - name: \"University of Western Australia\" \n department: \"School of Biological Sciences\"\n - name: \"Sarah Syedia Hasnain\" \n orcid: \"0000-0003-4358-5478\" \n affiliation: \n - name: \"Middle East Technical University\" \n department: \"Department of Biological Sciences\"\n - name: \"Justin Chase Havird\" \n orcid: \"0000-0002-8692-6503\" \n affiliation: \n - name: \"University of Texas at Austin\" \n department: \"Dept. of Integrative Biology\"\n - name: \"Andrew J. Heaton\" \n orcid: \"0000-0002-1916-9979\" \n affiliation: \n - name: \"Grand Bay National Estuarine Research Reserve\" \n department: \"\"\n - name: \"María Laura Herrera-Chaustre\"\n orcid: \"0009-0006-2890-5583\"\n affiliation:\n - name: \"Universidad de los Andes\"\n department: \"\"\n - name: \"Tanner J. Howard\"\n orcid: \"0000-0001-7772-1613\"\n affiliation:\t\n - name: \"\"\n department: \"\"\n - name: \"Bin-Yan Hsu\" \n orcid: \"0000-0002-3799-0509\" \n affiliation: \n - name: \"University of Turku\" \n department: \"Department of Biology\"\n - name: \"Fabiola Iannarilli\"\n orcid: \"0000-0002-7018-3557\"\n affiliation:\n - name: \"University of Minnesota\"\n department: \"Dept of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Esperanza C. Iranzo\" \n orcid: \"0000-0001-9411-8437\" \n affiliation: \n - name: \"Universidad Austral de Chile\" \n department: \"Instituto de Ciencia Animal. Facultad de Ciencias Veterinarias\"\n - name: \"Erik N. K. Iverson\" \n orcid: \"0000-0002-3756-9511\" \n affiliation: \n - name: \"The University of Texas at Austin\" \n department: \"Department of Integrative Biology\"\n - name: \"Saheed Olaide Jimoh\" \n orcid: \"0000-0002-3238-9079 \" \n affiliation: \n - name: \"University of Wyoming\" \n department: \"Department of Botany\"\n - name: \"Douglas H. Johnson\" \n orcid: \"0000-0002-7778-6641\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife, and Conservation Biology, University of Minnesota\"\n - name: \"Martin Johnsson\" \n orcid: \"0000-0003-1262-4585\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Animal Breeding and Genetics\"\n - name: \"Jesse Jorna\" \n affiliation: \n - name: \"Brigham Young University\" \n department: \"Department of Biology\"\n - name: \"Tommaso Jucker\" \n orcid: \"0000-0002-0751-6312\" \n affiliation: \n - name: \"University of Bristol\" \n department: \"School of Biological Sciences\"\n - name: \"Martin Jung\" \n orcid: \"0000-0002-7569-1390\" \n affiliation: \n - name: \"International Institute for Applied Systems Analysis (IIASA)\" \n department: \"\"\n - name: \"Ineta Kačergytė\" \n orcid: \"0000-0003-4756-8253\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Ecology\"\n - name: \"Oliver Kaltz\"\n orcid: \"\"\n affiliation:\n - name: \"Université de Montpellier\"\n department: \"\"\n - name: \"Alison Ke\" \n orcid: \"0000-0001-9111-449X\" \n affiliation: \n - name: \"University of California, Davis\" \n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"Clint D. Kelly\" \n orcid: \"0000-0002-0693-7211\" \n affiliation: \n - name: \"Université du Québec à Montréal\" \n department: \"Département des Sciences biologiques\"\n - name: \"Katharine Keogan\" \n orcid: \"0000-0002-1801-7412\" \n affiliation: \n - name: \"University of Edinburgh\" \n department: \"Institute of Evolutionary Biology\"\n - name: \"Friedrich Wolfgang Keppeler\" \n orcid: \"0000-0002-5165-1298\" \n affiliation: \n - name: \"Center for Limnology, University of Wisconsin - Madison\" \n department: \"Center for Limnology\"\n - name: \"Alexander K. Killion\" \n orcid: \"0000-0003-1449-8295\" \n affiliation: \n - name: \"Yale University\" \n department: \"Center for Biodiversity and Global Change\"\n - name: \"Dongmin Kim\" \n orcid: \"0000-0002-1508-1590\" \n affiliation: \n - name: \"University of Minnesota, St. Paul\" \n department: \"Department of Ecology, Evolution, and Behavior\"\n - name: \"David P Kochan\" \n orcid: \"0000-0002-3643-3516\" \n affiliation: \n - name: \"Florida International University\" \n department: \"Institute of Environment and Department of Biological Sciences\"\n - name: \"Peter Korsten\" \n orcid: \"0000-0003-0814-9099\" \n affiliation: \n - name: \"Aberystwyth University\" \n department: \"Department of Life Sciences\"\n - name: \"Shan Kothari\" \n orcid: \"0000-0001-9445-5548\" \n affiliation: \n - name: \"Université de Montréal\" \n department: \"Institut de recherche en biologie végétale\"\n - name: \"Jonas Kuppler\" \n orcid: \"0000-0003-4409-9367\" \n affiliation: \n - name: \"Ulm University\" \n department: \"Institute of Evolutionary Ecology and Conservation Genomics\"\n - name: \"Jillian M Kusch\" \n orcid: \"0000-0003-0078-5621\" \n affiliation: \n - name: \"Memorial University of Newfoundland\" \n department: \"Department of Biology\"\n - name: \"Malgorzata Lagisz\" \n orcid: \"0000-0002-3993-6127\" \n affiliation: \n - name: \"University of New South Wales\" \n department: \"Evolution & Ecology Research Centre and School of Biological, Earth & Environmental Sciences\"\n - name: \"Kristen Marianne Lalla\"\n orcid: \"0000-0003-1422-0672\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Daniel J. Larkin\" \n orcid: \"0000-0001-6378-0495\" \n affiliation: \n - name: \"University of Minnesota-Twin Cities\"\n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Courtney L. Larson\" \n orcid: \"0000-0003-3878-259X\" \n affiliation: \n - name: \"The Nature Conservancy\" \n department: \"\"\n - name: \"Katherine S. Lauck\" \n orcid: \"0000-0003-3303-5050\" \n affiliation: \n - name: \"University of California, Davis\" \n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"M. Elise Lauterbur\" \n orcid: \"0000-0002-7362-3618\" \n affiliation: \n - name: \"University of Arizona\" \n department: \"Ecology and Evolutionary Biology\"\n - name: \"Alan Law\" \n orcid: \"0000-0001-5971-3214\" \n affiliation: \n - name: \"University of Stirling\" \n department: \"Biolgical and Environmental Sciences\"\n - name: \"Don-Jean Léandri-Breton\"\n orcid: \"0000-0003-0547-2966\" \n affiliation: \n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Jonas J. Lembrechts\"\n orcid: \"0000-0002-1933-0750\"\n affiliation:\n - name: \"University of Antwerp\"\n department: \"Department of Biology\"\n - name: \"Kiara L'Herpiniere\"\n orcid: \"0000-0003-0322-1266\"\n affiliation:\n - name: \"Macquarie University\"\n department: \"Natural sciences\"\n - name: \"Eva J. P. Lievens\"\n orcid: \"0000-0003-3280-0072\"\n affiliation: \n - name: \"University of Konstanz\"\n department: \"Aquatic Ecology and Evolution Group, Limnological Institute\"\n - name: \"Daniela Oliveira de Lima\" \n orcid: \"0000-0001-6650-2570\" \n affiliation: \n - name: \"Universidade Federal da Fronteira Sul\" \n department: \"Campus Cerro Largo\"\n - name: \"Shane Lindsay\" \n affiliation: \n - name: \"University of Hull\"\n department: \"School of Psychology and Social Work\"\n - name: \"Martin Luquet\"\n orcid: \"0000-0002-4656-4923\"\n affiliation:\n - name: \"Université de Pau et des Pays de l′Adour\"\n department: \"UMR 1224 ECOBIOP\"\n - name: \"Ross MacLeod\"\n orcid: \"0000-0001-5508-0202\"\n affiliation:\n - name: \"Liverpool John Moores University\"\n department: \"School of Biological & Environmental Sciences\"\n - name: \"Kirsty H. Macphie\"\n orcid: \"0000-0002-9824-4833\" \n affiliation: \n - name: \"University of Edinburgh\"\n department: \"Institute of Ecology and Evolution\"\n - name: \"Kit Magellan\"\n orcid: \"\"\n affiliation:\n - name: \"\"\n department: \"\"\n - name: \"Magdalena M. Mair\" \n orcid: \"0000-0003-0074-6067\" \n affiliation: \n - name: \"Bayreuth Center of Ecology and Environmental Research (BayCEER), University of Bayreuth\" \n department: \"Statistical Ecotoxicology\"\n - name: \"Lisa E. Malm\"\n orcid: \"0000-0002-7412-9515\"\n affiliation: \n - name: \"Umeå University\"\n department: \"Ecology and Environmental Science\"\n - name: \"Stefano Mammola\"\n orcid: \"0000-0002-4471-9055\"\n affiliation: \n - name: \"National Research Council of Italy (CNR)\"\n department: \"Molecular Ecology Group (MEG), Water Research Institute (IRSA)\"\n - name: \"Caitlin P. Mandeville\"\n orcid: \"0000-0002-1361-607X\"\n affiliation:\n - name: \"Norwegian University of Science and Technology\"\n department: \"Department of Natural History\"\n - name: \"Michael Manhart\"\n orcid: \"0000-0003-3791-9056\"\n affiliation: \n - name: \"Rutgers University Robert Wood Johnson Medical School\"\n department: \"Center for Advanced Biotechnology and Medicine\"\n - name: \"Laura Milena Manrique-Garzon\"\n orcid: \"0009-0004-4671-6968\"\n affiliation:\n - name: \"Universidad de los Andes\"\n department: \"Departamento de Ciencias Biológicas\"\n - name: \"Elina Mäntylä\" \n orcid: \"0000-0002-2267-7114\"\n affiliation: \n - name: \"University of Turku\"\n department: \"Department of Biology\"\n - name: \"Philippe Marchand\" \n orcid: \"0000-0001-6717-0475\" \n affiliation: \n - name: \"Université du Québec en Abitibi-Témiscamingue\"\n department: \"Institut de recherche sur les forêts\"\n - name: \"Benjamin Michael Marshall\" \n orcid: \"0000-0001-9554-0605\" \n affiliation: \n - name: \"University of Stirling\"\n department: \"Biological and Environmental Sciences\"\n - name: \"Dominic Andreas Martin\"\n orcid: \"0000-0001-7197-2278\"\n affiliation: \n - name: \"University of Bern\"\n department: \"Institute of Plant Sciences\"\n - name: \"Jake Mitchell Martin\"\n orcid: \"0000-0001-9544-9094\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Charles A. Martin\"\n orcid: \"0000-0003-3185-4634\"\n affiliation: \n - name: \"Université du Québec à Trois-Rivières\"\n department: \"\"\n - name: \"April Robin Martinig\" \n orcid: \"0000-0002-0972-6903\"\n affiliation: \n - name: \"University of New South Wales\"\n department: \"School of Biological, Earth and Environmental Sciences\"\n - name: \"Erin S. McCallum\"\n orcid: \"0000-0001-5426-9652\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Mark McCauley\"\n orcid: \"0000-0001-5347-6860\"\n affiliation:\n - name: \"University of Florida\"\n department: \"Whitney Laboratory for Marine Bioscience\"\n - name: \"Sabrina M. McNew\" \n orcid: \"0000-0002-1345-1674\" \n affiliation: \n - name: \"University of Arizona\"\n department: \"Ecology and Evolutionary Biology\"\n - name: \"Scott J. Meiners\" \n orcid: \"0000-0003-1805-398X\" \n affiliation: \n - name: \"Eastern Illinois University\"\n department: \"Biological Sciences\"\n - name: \"Thomas Merkling\"\n orcid: \"0000-0002-5878-0359\"\n affiliation:\n - name: \"Université de Lorraine, Inserm1433 CIC-P CHRU de Nancy\"\n department: \"Centre d'Investigations Clinique Plurithématique - Institut Lorrain du Coeur et des Vaisseaux\"\n - name: \"Marcus Michelangeli\" \n orcid: \"0000-0002-0053-6759\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Maria Moiron\" \n orcid: \"0000-0003-0991-1460\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Evolutionary biology department\"\n - name: \"Bruno Moreira\" \n orcid: \"0000-0002-7319-2555\" \n affiliation: \n - name: \"Centro de Investigaciones sobre Desertificación, Consejo Superior de Investigaciones Cientificas (CIDE-CSIC/UV/GV)\" \n department: \"Department of Ecology and global change\"\n - name: \"Jennifer Mortensen\" \n affiliation: \n - name: \"University of Arkansas\" \n department: \"Department of Biological Sciences\"\n - name: \"Benjamin Mos\" \n orcid: \"0000-0003-3687-516X\" \n affiliation: \n - name: \"The University of Queensland\" \n department: \"School of the Environment, Faculty of Science\"\n - name: \"Taofeek Olatunbosun Muraina\" \n orcid: \"0000-0003-2646-2732\" \n affiliation: \n - name: \"Oyo State College of Agriculture and Technology\" \n department: \"Department of Animal Health and Production\"\n - name: \"Penelope Wrenn Murphy\"\n orcid: \"0000-0002-9989-1696\"\n affiliation:\n - name: \"University of Wisconsin-Madison\"\n department: \"Department of Forest & Wildlife Ecology\"\n - name: \"Luca Nelli\"\n orcid: \"0000-0001-6091-4072\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"School of Biodiversity, One Health and Veterinary Medicine\"\n - name: \"Petri Niemelä\"\n orcid: \"\"\n affiliation:\n - name: \"University of Helsinki\"\n department: \"Organismal and Evolutionary Biology Research Programme, Faculty of Biological and Environmental Sciences\"\n - name: \"Josh Nightingale\"\n orcid: \"0000-0002-1188-7773\"\n affiliation:\n - name: \"University of Iceland\"\n department: \"South Iceland Research Centre\"\n - name: \"Gustav Nilsonne\"\n orcid: \"0000-0001-5273-0150\" \n affiliation: \n - name: \"Karolinska Institutet\"\n department: \"Department of Clinical Neuroscience\"\n - name: \"Sergio Nolazco\"\n orcid: \"0000-0003-2625-9283\"\n affiliation: \n - name: \"Monash University\"\n department: \"School of Biological Sciences\"\n - name: \"Sabine S. Nooten\"\n orcid: \"0000-0002-1798-315X\"\n affiliation: \n - name: \"University of Würzburg\"\n department: \"Animal Ecology and Tropical Biology\"\n - name: \"Jessie Lanterman Novotny\" \n orcid: \"0000-0001-5079-4070\"\n affiliation: \n - name: \"Hiram College\"\n department: \"Biology\"\n - name: \"Agnes Birgitta Olin\"\n orcid: \"0000-0002-8508-3911\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Aquatic Resources\"\n - name: \"Chris L. Organ\" \n affiliation: \n - name: \"Montana State University\" \n department: \"Department of Earth Sciences\"\n - name: \"Kate L Ostevik\" \n orcid: \"0000-0002-2197-9284\" \n affiliation: \n - name: \"University of California Riverside\"\n department: \"Department of Evolution, Ecology, and Organismal Biology\"\n - name: \"Facundo Xavier Palacio\"\n orcid: \"0000-0002-6536-1400\"\n affiliation: \n - name: \"Universidad Nacional de La Plata\" \n department: \"Sección Ornitología\"\n - name: \"Matthieu Paquet\" \n orcid: \"0000-0003-1182-2299\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Ecology\"\n - name: \"Darren James Parker\"\n orcid: \"0000-0003-4027-7779\"\n affiliation:\n - name: \"Bangor University\"\n department: \"\"\n - name: \"David J Pascall\" \n orcid: \"0000-0002-7543-0860\" \n affiliation: \n - name: \"University of Cambridge\" \n department: \"MRC Biostatistics Unit\"\n - name: \"Valerie J. Pasquarella\" \n orcid: \"0000-0003-1258-6195\" \n affiliation: \n - name: \"Harvard University\"\n department: \"Harvard Forest\"\n - name: \"John Harold Paterson\"\n orcid: \"\"\n affiliation:\n - name: \"University of Stirling\"\n department: \"Biological and Environmental Sciences\"\n - name: \"Ana Payo-Payo \" \n orcid: \"0000-0001-5482-242X\"\n affiliation: \n - name: \"Universidad Complutense de Madrid\"\n department: \"Departamento de Biodiversidad, Ecología y Evolución.\"\n - name: \"Karen Marie Pedersen\"\n orcid: \"0000-0003-0460-1420\"\n affiliation: \n - name: \"Technische Universität Darmstadt\"\n department: \"Biology department\"\n - name: \"Grégoire Perez\"\n orcid: \"0000-0001-8861-4856\"\n affiliation: \n - name: \"CIRAD\"\n department: \"UMR 1309 ASTRE, CIRAD – INRAE\"\n - name: \"Kayla I. Perry\"\n orcid: \"0000-0001-9903-8057\"\n affiliation: \n - name: \"The Ohio State University\"\n department: \"Department of Entomology\"\n - name: \"Patrice Pottier\" \n orcid: \"0000-0003-2106-6597\"\n affiliation: \n - name: \"The University of New South Wales\" \n department: \"Evolution & Ecology Research Centre, School of Biological, Earth and Environmental Sciences\"\n - name: \"Michael J. Proulx\"\n orcid: \"0000-0003-4066-3645\" \n affiliation: \n - name: \"University of Bath\"\n department: \"Department of Psychology\"\n - name: \"Raphaël Proulx\"\n orcid: \"0000-0002-9188-9225\"\n affiliation: \n - name: \"Université du Québec à Trois-Rivières\"\n department: \"Chaire de recherche en intégrité écologique\"\n - name: \"Jessica L Pruett\" \n affiliation: \n - name: \"University of Southern Mississippi\" \n department: \"Mississippi Based RESTORE Act Center of Excellence\"\n - name: \"Veronarindra Ramananjato\" \n orcid: \"0000-0003-2398-3671\" \n affiliation: \n - name: \"University of California Berkeley\" \n department: \"Department of Integrative Biology\"\n - name: \"Finaritra Tolotra Randimbiarison\" \n affiliation: \n - name: \"Université d'Antananarivo\" \n department: \"Mention Zoologie et Biodiversité Animale\"\n - name: \"Onja H. Razafindratsima\" \n orcid: \"0000-0003-1655-6647\" \n affiliation: \n - name: \"University of California, Berkeley\"\n department: \"Department of Integrative Biology \"\n - name: \"Diana J. Rennison\" \n orcid: \"0000-0002-5944-0743\" \n affiliation: \n - name: \"University of California San Diego\" \n department: \"Department of Ecology, Behavior and Evolution\"\n - name: \"Federico Riva\" \n orcid: \"0000-0002-1724-4293\" \n affiliation: \n - name: \"VU Amsterdam\" \n department: \"Institute for Environmental Sciences\"\n - name: \"Sepand Riyahi Riyahi\" \n orcid: \"0000-0003-3317-3576\" \n affiliation: \n - name: \"University of Vienna\" \n department: \"Department of Evolutionary Anthropology\"\n - name: \"Michael James Roast\"\n orcid: \"0000-0002-9454-7562\" \n affiliation: \n - name: \"University of Veterinary Medicine\"\n department: \"Konrad Lorenz Institute for Ethology\"\n - name: \"Felipe Pereira Rocha\" \n orcid: \"0000-0003-3968-1280\" \n affiliation: \n - name: \"The University of Hong Kong\"\n department: \"School of Biological Sciences\"\n - name: \"Dominique G. Roche\"\n orcid: \"0000-0002-3326-864X\"\n affiliation: \n - name: \"Université de Neuchâtel\"\n department: \"Institut de biologie\"\n - name: \"Cristian Román-Palacios\" \n orcid: \"0000-0003-1696-4886\" \n affiliation: \n - name: \"University of Arizona\" \n department: \"School of Information\"\n - name: \"Michael S. Rosenberg\" \n orcid: \"0000-0001-7882-2467\"\n affiliation: \n - name: \"Virginia Commonwealth University\"\n department: \"Center for Biological Data Science\"\n - name: \"Jessica Ross\" \n orcid: \"0000-0003-4124-3116\" \n affiliation: \n - name: \"University of Wisconsin\" \n department: \"\"\n - name: \"Freya E Rowland\" \n orcid: \"0000-0002-1041-5301\" \n affiliation: \n - name: \"Yale University\" \n department: \"School of the Environment\"\n - name: \"Deusdedith Rugemalila\" \n orcid: \"0000-0002-7473-4301\" \n affiliation: \n - name: \"Florida International University\" \n department: \"Institute of the Environment\"\n - name: \"Avery L. Russell\" \n orcid: \"0000-0001-8036-2711\" \n affiliation: \n - name: \"Missouri State University\"\n department: \"Department of Biology\"\n - name: \"Suvi Ruuskanen\" \n orcid: \"0000-0001-5582-9455\"\n affiliation: \n - name: \"University of Jyväskylä \" \n department: \"Department of Biological and Environmental Science\"\n - name: \"Patrick Saccone\" \n orcid: \"0000-0001-8820-593X\" \n affiliation: \n - name: \"OeAW (Austrian Academy of Sciences)\" \n department: \"Institute for Interdisciplinary Mountain Research\"\n - name: \"Asaf Sadeh\" \n orcid: \"0000-0002-2704-4033\" \n affiliation: \n - name: \"Newe Ya'ar Research Center, Agricultural Research Organization (Volcani Institute)\" \n department: \"Department of Natural Resources\"\n - name: \"Stephen M Salazar\" \n orcid: \"0000-0002-5437-0280\" \n affiliation: \n - name: \"Bielefeld University\"\n department: \"Department of Animal Behaviour\"\n - name: \"kris sales\" \n orcid: \"0000-0002-7568-2507\" \n affiliation: \n - name: \"Office for National Statistics\" \n department: \"\"\n - name: \"Pablo Salmón\" \n orcid: \"0000-0001-9718-6611\" \n affiliation: \n - name: \"Institute of Avian Research 'Vogelwarte Helgoland'\" \n department: \"\"\n - name: \"Alfredo Sánchez-Tójar\" \n orcid: \"0000-0002-2886-0649\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Department of Evolutionary Biology\"\n - name: \"Leticia Pereira Santos\" \n affiliation: \n - name: \"Universidade Federal de Goiás\"\n department: \"Ecology Department\"\n - name: \"Francesca Santostefano\"\n orcid: \"0000-0003-3308-6552\"\n affiliation:\n - name: \"University of Exeter\"\n department: \"University of Exeter\"\n - name: \"Hayden T. Schilling\" \n orcid: \"0000-0002-7291-347X\" \n affiliation: \n - name: \"New South Wales Department of Primary Industries Fisheries\" \n department: \"\"\n - name: \"Marcus Schmidt\"\n orcid: \"0000-0002-5546-5521\"\n affiliation: \n - name: \"Leibniz Centre for Agricultural Landscape Research (ZALF)\"\n department: \"Research Data Management\"\n - name: \"Tim Schmoll\" \n orcid: \"0000-0003-3234-7335\"\n affiliation: \n - name: \"Bielefeld University\"\n department: \"Evolutionary Biology\"\n - name: \"Adam C. Schneider\" \n orcid: \"0000-0002-4249-864X\"\n affiliation: \n - name: \"University of Wisconsin-La Crosse\"\n department: \"Biology Department\"\n - name: \"Allie E Schrock\" \n orcid: \"0000-0001-5825-6306\" \n affiliation: \n - name: \"Duke University\"\n department: \"Department of Evolutionary Anthropology\"\n - name: \"Julia Schroeder\" \n orcid: \"0000-0002-4136-843X\"\n affiliation: \n - name: \"Imperial College London\"\n department: \"Department of Life Sciences\"\n - name: \"Nicolas Schtickzelle\"\n orcid: \"0000-0001-7829-5361\" \n affiliation: \n - name: \"UCLouvain\" \n department: \"Earth and Life Institute, Ecology and Biodiversity\"\n - name: \"Nick L. Schultz\" \n orcid: \"0000-0002-6760-9481\" \n affiliation: \n - name: \"Federation University Australia\"\n department: \"Future Regions Research Centre\"\n - name: \"Drew A. Scott\"\n orcid: \"0000-0003-0361-9522\"\n affiliation: \n - name: \"USDA - Agricultural Research Service\" \n department: \"Northern Great Plains Research Laboratory\"\n - name: \"Michael Peter Scroggie\"\n orcid: \"0000-0001-9441-6565\"\n affiliation:\n - name: \"Arthur Rylah Insitute for Environmental Research\"\n - name: \"Julie Teresa Shapiro\"\n orcid: \"0000-0002-4539-650X\"\n affiliation:\n - name: \"University of Lyon - French Agency for Food, Environmental and Occupational Health and Safety (ANSES)\"\n department: \"Epidemiology and Surveillance Support Unit\"\n - name: \"Nitika Sharma Sharma\" \n orcid: \"0000-0002-7411-5594\" \n affiliation: \n - name: \"University of California Los Angeles\" \n department: \"UCLA Anderson Center for Impact\"\n - name: \"Caroline L Shearer\"\n orcid: \"0000-0001-7886-9302\"\n affiliation: \n - name: \"Duke University\"\n department: \"Department of Evolutionary Anthropology\"\n - name: \"Diego Simón\"\n orcid: \"0000-0002-6317-3991\"\n affiliation:\n - name: \"Universidad de la República\"\n department: \"Facultad de Ciencias\"\n - name: \"Michael I. Sitvarin\" \n orcid: \"0000-0002-3080-3619\" \n affiliation: \n - name: \"Independent researcher\" \n department: \"\"\n - name: \"Fabrício Luiz Skupien\"\n orcid: \"0000-0003-1991-7102\"\n affiliation: \n - name: \"Universidade Federal do Rio de Janeiro\"\n department: \"Programa de Pós-Graduação em Ecologia, Instituto de Biologia, Centro de Ciências da Saúde\"\n - name: \"Heather Lea Slinn\" \n affiliation: \n - name: \"Vive Crop Protection\"\n department: \"\"\n - name: \"Jeremy A Smith\" \n orcid: \"0000-0002-4942-8310\" \n affiliation: \n - name: \"British Trust for Ornithology\"\n department: \"\"\n - name: \"Grania Polly Smith\"\n affiliation: \n - name: \"University of Cambridge\"\n department: \"\"\n - name: \"Rahel Sollmann\"\n orcid: \"0000-0002-1607-2039\"\n affiliation: \n - name: \"University of California Davis\"\n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"Kaitlin Stack Whitney\" \n orcid: \"0000-0002-0815-5037\" \n affiliation: \n - name: \"Rochester Institute of Technology\"\n department: \"Science, Technology & Society Department\"\n - name: \"Shannon Michael Still\" \n orcid: \"0000-0002-7370-1217\"\n affiliation: \n - name: \"Nomad Ecology\"\n department: \"\"\n - name: \"Erica F. Stuber\"\n orcid: \"0000-0002-2687-6874\"\n affiliation: \n - name: \"Utah State University\"\n department: \"Wildland Resources Department\"\n - name: \"Guy F. Sutton\"\n orcid: \"0000-0003-2405-0945\" \n affiliation: \n - name: \"Rhodes University\"\n department: \"Center for Biological Control, Department of Zoology and Entomology\"\n - name: \"Ben Swallow\" \n orcid: \"0000-0002-0227-2160\" \n affiliation: \n - name: \"University of St Andrews\" \n department: \"School of Mathematics and Statistics and Centre for Research in Ecological and Environmental Modelling\"\n - name: \"Conor Claverie Taff\" \n orcid: \"0000-0003-1497-7990\" \n affiliation: \n - name: \"Cornell University\" \n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Elina Takola\" \n orcid: \"0000-0003-1268-5513\" \n affiliation: \n - name: \"Helmholtz Centre for Environmental Research – UFZ\" \n department: \"Department of Computational Landscape Ecology\"\n - name: \"Andrew J Tanentzap\" \n orcid: \"0000-0002-2883-1901\" \n affiliation: \n - name: \"Trent University\"\n department: \"Ecosystems and Global Change Group, School of the Environment\"\n - name: \"Rocío Tarjuelo\"\n orcid: \"0000-0002-0638-1911\"\n affiliation:\n - name: \"Universidad de Valladolid\"\n department: \"Instituto Universitario de Investigación en Gestión Forestal Sostenible (iuFOR)\"\n - name: \"Richard J. Telford\"\n orcid: \"0000-0001-9826-3076\"\n affiliation:\n - name: \"University of Bergen\"\n department: \"Department of Biological Sciences\"\n - name: \"Christopher J. Thawley\" \n orcid: \"0000-0002-6040-2613\" \n affiliation: \n - name: \"University of Rhode Island\" \n department: \"Department of Biological Science\"\n - name: \"Hugo Thierry\"\n orcid: \"\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Geography\"\n - name: \"Jacqueline Thomson\"\n orcid: \"\"\n affiliation:\n - name: \"University of Guelph\"\n department: \"Integrative Biology\"\n - name: \"Svenja Tidau\"\n orcid: \"0000-0003-0336-0450\"\n affiliation:\n - name: \"University of Plymouth\"\n department: \"School of Biological and Marine Sciences\"\n - name: \"Emily M. Tompkins\"\n orcid: \"0000-0002-1383-2039\"\n affiliation:\n - name: \"Wake Forest University\"\n department: \"Biology Deptartment\"\n - name: \"Claire Marie Tortorelli\" \n orcid: \"0000-0001-9493-9817\" \n affiliation: \n - name: \"University of California, Davis\"\n department: \"Plant Sciences\"\n - name: \"Andrew Trlica\"\n orcid: \"0000-0001-7692-323X\"\n affiliation: \n - name: \"North Carolina State University\"\n department: \"College of Natural Resources\"\n - name: \"Biz R Turnell\" \n orcid: \"0000-0002-1068-304X\" \n affiliation: \n - name: \"Technische Universität Dresden\" \n department: \"Institute of Zoology\"\n - name: \"Lara Urban\"\n orcid: \"0000-0002-5445-9314\"\n affiliation: \n - name: \"Helmholtz Zentrum Muenchen\"\n department: \"Helmholtz AI\"\n - name: \"Jessica Eva Megan van der Wal\"\n orcid: \"0000-0002-6441-3598\"\n affiliation:\n - name: \"University of Cape Town\"\n department: \"FitzPatrick Institute of African Ornithology\"\n - name: \"Jens Van Eeckhoven\"\n orcid: \"0000-0001-8407-4290\"\n affiliation:\n - name: \"University College London\"\n department: \"Department of Cell & Developmental Biology, Division of Biosciences\"\n - name: \"Stijn Van de Vondel\" \n orcid: \"0000-0002-0223-7330\" \n affiliation: \n - name: \"University of Antwerp\"\n department: \"Department of Biology\"\n - name: \"Francis van Oordt\" \n orcid: \"0000-0002-8471-235X\" \n affiliation: \n - name: \"McGill University\" \n department: \"Natural Resource Sciences\"\n - name: \"Mark C. Vanderwel\"\n affiliation: \n - name: \"University of Regina\" \n department: \"Department of Biology\"\n - name: \"K. Michelle Vanderwel\" \n affiliation: \n - name: \"University of Saskatchewan\" \n department: \"Biology\"\n - name: \"Karen J Vanderwolf\" \n orcid: \"0000-0003-0963-3093\" \n affiliation: \n - name: \"University of Waterloo\" \n department: \"Biology\"\n - name: \"Juliana Vélez\"\n orcid: \"0000-0003-0412-2761\"\n affiliation:\n - name: \"University of Minnesota\"\n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Diana Carolina Vergara-Florez\"\n orcid: \"0000-0001-9349-4027\"\n affiliation:\n - name: \"University of Michigan\"\n department: \"Department of Ecology & Evolutionary Biology\"\n - name: \"Brian C. Verrelli\" \n orcid: \"0000-0002-9670-4920\" \n affiliation: \n - name: \"Virginia Commonwealth University\" \n department: \"Center for Biological Data Science\"\n - name: \"Nora Villamil\" \n orcid: \"0000-0002-6957-2248\" \n affiliation: \n - name: \"Public Health Scotland\" \n department: \"Lothian Analytical Services\"\n - name: \"Marcus Vinícius Vieira\"\n orcid: \"0000-0002-4472-5447\" \n affiliation: \n - name: \"Universidade Federal do Rio de Janeiro\" \n department: \"Dept. Ecologia, Instituto de Biologia\"\n - name: \"Nora Villamil\"\n orcid: \"0000-0002-6957-2248\"\n affiliation:\n - name: \"Public Health Scotland\"\n department: \"Lothian Analytical Services\"\n - name: \"Valerio Vitali\"\t \n orcid: \"0000-0003-3593-1510\"\n affiliation:\n - name: \"University of Muenster\"\n department: \"Institute for Evolution and Biodiversity\"\n - name: \"Julien Vollering\" \n orcid: \"0000-0002-7409-2898\" \n affiliation: \n - name: \"Western Norway University of Applied Sciences \" \n department: \"Department of Environmental Sciences\"\n - name: \"Jeffrey Walker\"\n orcid: \"0000-0003-2864-7036\"\n affiliation:\n - name: \"University of Southern Maine\"\n department: \"Department of Biological Sciences\"\n - name: \"Xanthe J Walker\" \n orcid: \"0000-0002-2448-691X\" \n affiliation: \n - name: \"Northern Arizona University\" \n department: \"Center for Ecosystem Science and Society\"\n - name: \"Jonathan A. Walter\" \n orcid: \"0000-0003-2983-751X\" \n affiliation: \n - name: \"University of California Davis\"\n department: \"Center for Watershed Sciences\"\n - name: \"Pawel Waryszak\" \n orcid: \"0000-0002-4245-3150\" \n affiliation: \n - name: \"University of Southern Queensland\"\n department: \"School of Agriculture and Environmental Science\"\n - name: \"Ryan J. Weaver\" \n orcid: \"0000-0002-6160-4735\" \n affiliation: \n - name: \"Iowa State University\" \n department: \"Department of Ecology, Evolution, and Organismal Biology\"\n - name: \"Ronja E. M. Wedegärtner\"\n orcid: \"0000-0003-4633-755X\"\n affiliation:\n - name: \"Fram Project AS\"\n department: \"\"\n - name: \"Daniel L. Weller\" \n orcid: \"0000-0001-7259-6331\" \n affiliation: \n - name: \"Virginia Polytechnic Institute and State University\" \n department: \"Department of Food Science & Technology\"\n - name: \"Shannon Whelan\" \n orcid: \"0000-0003-2862-327X\" \n affiliation: \n - name: \"McGill University\" \n department: \"Department of Natural Resource Sciences\"\n - name: \"Rachel Louise White\" \n orcid: \"0000-0003-0694-7847\" \n affiliation: \n - name: \"University of Brighton\" \n department: \"School of Applied Sciences\"\n - name: \"David William Wolfson\" \n orcid: \"0000-0003-1098-9206\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Andrew Wood\" \n orcid: \"0000-0001-6863-0824\" \n affiliation: \n - name: \"University of Oxford\" \n department: \"Department of Biology\"\n - name: \"Scott W. Yanco\"\n orcid: \"0000-0003-4717-9370\" \n affiliation: \n - name: \"University of Colorado Denver\" \n department: \"Department of Integrative Biology\"\n - name: \"Jian D. L. Yen\" \n orcid: \"0000-0001-7964-923X\" \n affiliation: \n - name: \"Arthur Rylah Institute for Environmental Research\" \n department: \"\"\n - name: \"Casey Youngflesh\" \n orcid: \"0000-0001-6343-3311\" \n affiliation: \n - name: \"Michigan State University\" \n department: \"Ecology, Evolution, and Behavior Program\"\n - name: \"Giacomo Zilio\" \n orcid: \"0000-0002-4448-3118\" \n affiliation: \n - name: \"University of Montpellier, CNRS\" \n department: \"ISEM\"\n - name: \"Cédric Zimmer\" \n orcid: \"0000-0001-8160-2836\" \n affiliation: \n - name: \"Université Sorbonne Paris Nord\"\n department: \"Laboratoire d’Ethologie Expérimentale et Comparée, LEEC, UR4443\"\n - name: \"Gregory Mark Zimmerman\"\n orcid: \"\"\n affiliation:\n - name: \"Lake Superior State University\"\n department: \"Department of Science and Environment\"\n - name: \"Rachel A. Zitomer\" \n orcid: \"0000-0002-1888-1817\"\n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society\"\ncitation:\n type: article-journal\n container-title: \"BMC Biology\"\n issued: \"Date\"\n volume: \"\"\n doi: \"\"\n url: \"\"\n author:\n - name: \"Elliot Gould\"\n - name: \"Hannah S. Fraser\"\n - name: \"Timothy H. Parker\"\n - name: \"Shinichi Nakagawa\"\n - name: \"Simon C. Griffith\"\n - name: \"Peter A. Vesk\"\n - name: \"Fiona Fidler\"\n - name: \"Daniel G. Hamilton\"\n - name: \"Robin N. Abbey-Lee\"\n - name: \"Jessica K. Abbott\"\n - name: \"Luis A. Aguirre\"\n - name: \"Carles Alcaraz\"\n - name: \"Irith Aloni\"\n - name: \"Drew Altschul\"\n - name: \"Kunal Arekar\"\n - name: \"Jeff W. Atkins\"\n - name: \"Joe Atkinson\"\n - name: \"Christopher M. Baker\"\n - name: \"Meghan Barrett\"\n - name: \"Kristian Bell\"\n - name: \"Suleiman Kehinde Bello\"\n - name: \"Iván Beltrán\"\n - name: \"Bernd J. Berauer\"\n - name: \"Michael Grant Bertram\"\n - name: \"Peter D. Billman\"\n - name: \"Charlie K. Blake\"\n - name: \"Shannon Blake\"\n - name: \"Louis Bliard\"\n - name: \"Andrea Bonisoli-Alquati\"\n - name: \"Timothée Bonnet\"\n - name: \"Camille Nina Marion Bordes\"\n - name: \"Aneesh P. H. Bose\"\n - name: \"Thomas Botterill-James\"\n - name: \"Melissa Anna Boyd\"\n - name: \"Sarah A. Boyle\"\n - name: \"Tom Bradfer-Lawrence\"\n - name: \"Jennifer Bradham\"\n - name: \"Jack A. Brand\"\n - name: \"Martin I. Brengdahl\"\n - name: \"Martin Bulla\"\n - name: \"Luc Bussière\"\n - name: \"Ettore Camerlenghi\"\n - name: \"Sara E. Campbell\"\n - name: \"Leonardo L. F. Campos\"\n - name: \"Anthony Caravaggi\"\n - name: \"Pedro Cardoso\"\n - name: \"Charles J. W. Carroll\"\n - name: \"Therese A. Catanach\"\n - name: \"Xuan Chen\"\n - name: \"Heung Ying Janet Chik\"\n - name: \"Emily Sarah Choy\"\n - name: \"Alec Philip Christie\"\n - name: \"Angela Chuang\"\n - name: \"Amanda J. Chunco\"\n - name: \"Bethany L. Clark\"\n - name: \"Andrea Contina\"\n - name: \"Garth A Covernton\"\n - name: \"Murray P. Cox\"\n - name: \"Kimberly A. Cressman\"\n - name: \"Marco Crotti\"\n - name: \"Connor Davidson Crouch\"\n - name: \"Pietro B. D'Amelio\"\n - name: \"Alexandra Allison de Sousa\"\n - name: \"Timm Fabian Döbert\"\n - name: \"Ralph Dobler\"\n - name: \"Adam J. Dobson\"\n - name: \"Tim S. Doherty\"\n - name: \"Szymon Marian Drobniak\"\n - name: \"Alexandra Grace Duffy\"\n - name: \"Alison B. Duncan\"\n - name: \"Robert P. Dunn\"\n - name: \"Jamie Dunning\"\n - name: \"Trishna Dutta\"\n - name: \"Luke Eberhart-Hertel\"\n - name: \"Jared Alan Elmore\"\n - name: \"Mahmoud Medhat Elsherif\"\n - name: \"Holly M. English\"\n - name: \"David C. Ensminger\"\n - name: \"Ulrich Rainer Ernst\"\n - name: \"Ulrich Rainer Ernst\"\n - name: \"Stephen M. Ferguson\"\n - name: \"Esteban Fernandez-Juricic\"\n - name: \"Thalita Ferreira-Arruda Ferreira-Arruda\"\n - name: \"John Fieberg\"\n - name: \"Elizabeth A. Finch\"\n - name: \"Evan A. Fiorenza\"\n - name: \"David N. Fisher\"\n - name: \"Amélie Fontaine\"\n - name: \"Wolfgang Forstmeier\"\n - name: \"Yoan Fourcade\"\n - name: \"Graham S. Frank\"\n - name: \"Cathryn A. Freund\"\n - name: \"Eduardo Fuentes-Lillo\"\n - name: \"Sara L. Gandy\"\n - name: \"Dustin G. Gannon\"\n - name: \"Ana I. García-Cervigón\"\n - name: \"Alexis C. Garretson\"\n - name: \"Xuezhen Ge\"\n - name: \"William L. Geary\"\n - name: \"Charly Géron\"\n - name: \"Charly Géron\"\n - name: \"Marc Gilles\"\n - name: \"Antje Girndt\"\n - name: \"Daniel Gliksman\"\n - name: \"Harrison B. Goldspiel\"\n - name: \"Dylan G. E. Gomes\"\n - name: \"Megan Kate Good\"\n - name: \"Sarah C. Goslee\"\n - name: \"J. Stephen Gosnell\"\n - name: \"Eliza M. Grames\"\n - name: \"Paolo Gratton\"\n - name: \"Nicholas M. Grebe\"\n - name: \"Skye M. Greenler\"\n - name: \"Maaike Griffioen\"\n - name: \"Daniel M. Griffith\"\n - name: \"Frances J. Griffith\"\n - name: \"Jake J. Grossman\"\n - name: \"Ali Güncan\"\n - name: \"Stef Haesen\"\n - name: \"James G. Hagan\"\n - name: \"Heather A. Hager\"\n - name: \"Jonathan Philo Harris\"\n - name: \"Natasha Dean Harrison\"\n - name: \"Sarah Syedia Hasnain\"\n - name: \"Justin Chase Havird\"\n - name: \"Andrew J. Heaton\"\n - name: \"María Laura Herrera-Chaustre\"\n - name: \"Tanner J. Howard\"\n - name: \"Bin-Yan Hsu\"\n - name: \"Fabiola Iannarilli\"\n - name: \"Esperanza C. Iranzo\"\n - name: \"Erik N. K. Iverson\"\n - name: \"Saheed Olaide Jimoh\"\n - name: \"Saheed Olaide Jimoh\"\n - name: \"Douglas H. Johnson\"\n - name: \"Martin Johnsson\"\n - name: \"Jesse Jorna\"\n - name: \"Tommaso Jucker\"\n - name: \"Martin Jung\"\n - name: \"Ineta Kačergytė\"\n - name: \"Oliver Kaltz\"\n - name: \"Alison Ke\"\n - name: \"Clint D. Kelly\"\n - name: \"Katharine Keogan\"\n - name: \"Friedrich Wolfgang Keppeler\"\n - name: \"Alexander K. Killion\"\n - name: \"Dongmin Kim\"\n - name: \"David P. Kochan\"\n - name: \"Peter Korsten\"\n - name: \"Shan Kothari\"\n - name: \"Jonas Kuppler\"\n - name: \"Jillian M. Kusch\"\n - name: \"Malgorzata Lagisz\"\n - name: \"Kristen Marianne Lalla\"\n - name: \"Daniel J. Larkin\"\n - name: \"Courtney L. Larson\"\n - name: \"Katherine S. Lauck\"\n - name: \"M. Elise Lauterbur\"\n - name: \"Alan Law\"\n - name: \"Don-Jean Léandri-Breton\"\n - name: \"Jonas J. Lembrechts\"\n - name: \"Kiara L'Herpiniere\"\n - name: \"Eva J. P. Lievens\"\n - name: \"Daniela Oliveira de Lima\"\n - name: \"Shane Lindsay\"\n - name: \"Martin Luquet\"\n - name: \"Ross MacLeod\"\n - name: \"Kirsty H. Macphie\"\n - name: \"Kit Magellan\"\n - name: \"Magdalena M. Mair\"\n - name: \"Lisa E. Malm\"\n - name: \"Stefano Mammola\"\n - name: \"Caitlin P. Mandeville\"\n - name: \"Michael Manhart\"\n - name: \"Laura Milena Manrique-Garzon\"\n - name: \"Elina Mäntylä\"\n - name: \"Philippe Marchand\"\n - name: \"Benjamin Michael Marshall\"\n - name: \"Charles A. Martin\"\n - name: \"Dominic Andreas Martin\"\n - name: \"Jake Mitchell Martin\"\n - name: \"April Robin Martinig\"\n - name: \"Erin S. McCallum\"\n - name: \"Mark McCauley\"\n - name: \"Sabrina M. McNew\"\n - name: \"Scott J. Meiners\"\n - name: \"Thomas Merkling\"\n - name: \"Marcus Michelangeli\"\n - name: \"Maria Moiron\"\n - name: \"Bruno Moreira\"\n - name: \"Jennifer Mortensen\"\n - name: \"Benjamin Mos\"\n - name: \"Taofeek Olatunbosun Muraina\"\n - name: \"Penelope Wrenn Murphy\"\n - name: \"Luca Nelli\"\n - name: \"Petri Niemelä\"\n - name: \"Josh Nightingale\"\n - name: \"Gustav Nilsonne\"\n - name: \"Sergio Nolazco\"\n - name: \"Sabine S. Nooten\"\n - name: \"Jessie Lanterman Novotny\"\n - name: \"Agnes Birgitta Olin\"\n - name: \"Chris L. Organ\"\n - name: \"Kate L. Ostevik\"\n - name: \"Facundo Xavier Palacio\"\n - name: \"Matthieu Paquet\"\n - name: \"Darren James Parker\"\n - name: \"David J. Pascall\"\n - name: \"Valerie J. Pasquarella\"\n - name: \"John Harold Paterson\"\n - name: \"Ana Payo-Payo\"\n - name: \"Karen Marie Pedersen\"\n - name: \"Grégoire Perez\"\n - name: \"Kayla I. Perry\"\n - name: \"Patrice Pottier\"\n - name: \"Michael J. Proulx\"\n - name: \"Raphaël Proulx\"\n - name: \"Jessica L. Pruett\"\n - name: \"Veronarindra Ramananjato\"\n - name: \"Finaritra Tolotra Randimbiarison\"\n - name: \"Onja H. Razafindratsima\"\n - name: \"Diana J. Rennison\"\n - name: \"Federico Riva\"\n - name: \"Sepand Riyahi Riyahi\"\n - name: \"Michael James Roast\"\n - name: \"Felipe Pereira Rocha\"\n - name: \"Dominique G. Roche\"\n - name: \"Cristian Román-Palacios\"\n - name: \"Michael S. Rosenberg\"\n - name: \"Jessica Ross\"\n - name: \"Freya E. Rowland\"\n - name: \"Deusdedith Rugemalila\"\n - name: \"Avery L. Russell\"\n - name: \"Suvi Ruuskanen\"\n - name: \"Patrick Saccone\"\n - name: \"Asaf Sadeh\"\n - name: \"Stephen M. Salazar\"\n - name: \"kris sales\"\n - name: \"Pablo Salmón\"\n - name: \"Alfredo Sánchez-Tójar\"\n - name: \"Leticia Pereira Santos\"\n - name: \"Francesca Santostefano\"\n - name: \"Hayden T. Schilling\"\n - name: \"Marcus Schmidt\"\n - name: \"Tim Schmoll\"\n - name: \"Adam C. Schneider\"\n - name: \"Allie E. Schrock\"\n - name: \"Julia Schroeder\"\n - name: \"Nicolas Schtickzelle\"\n - name: \"Nick L. Schultz\"\n - name: \"Drew A. Scott\"\n - name: \"Michael Peter Scroggie\"\n - name: \"Julie Teresa Shapiro\"\n - name: \"Nitika Sharma Sharma\"\n - name: \"Caroline L. Shearer\"\n - name: \"Diego Simón\"\n - name: \"Michael I. Sitvarin\"\n - name: \"Fabrício Luiz Skupien\"\n - name: \"Heather Lea Slinn\"\n - name: \"Grania Polly Smith\"\n - name: \"Jeremy A. Smith\"\n - name: \"Rahel Sollmann\"\n - name: \"Kaitlin Stack Whitney\"\n - name: \"Shannon Michael Still\"\n - name: \"Erica F. Stuber\"\n - name: \"Guy F. Sutton\"\n - name: \"Ben Swallow\"\n - name: \"Conor Claverie Taff\"\n - name: \"Elina Takola\"\n - name: \"Andrew J Tanentzap\"\n - name: \"Rocío Tarjuelo\"\n - name: \"Richard J. Telford\"\n - name: \"Christopher J. Thawley\"\n - name: \"Hugo Thierry\"\n - name: \"Jacqueline Thomson\"\n - name: \"Svenja Tidau\"\n - name: \"Mark C. Vanderwel\"\n - name: \"Karen J. Vanderwolf\"\n - name: \"Juliana Vélez\"\n - name: \"Diana Carolina Vergara-Florez\"\n - name: \"Brian C. Verrelli\"\n - name: \"Marcus Vinícius Vieira\"\n - name: \"Nora Villamil\"\n - name: \"Valerio Vitali\"\n - name: \"Julien Vollering\"\n - name: \"Jeffrey Walker\"\n - name: \"Xanthe J. Walker\"\n - name: \"Jonathan A. Walter\"\n - name: \"Pawel Waryszak\"\n - name: \"Ryan J. Weaver\"\n - name: \"Ronja E. M. Wedegärtner\"\n - name: \"Daniel L. Weller\"\n - name: \"Shannon Whelan\"\n - name: \"Rachel Louise White\"\n - name: \"David William Wolfson\"\n - name: \"Andrew Wood\"\n - name: \"Scott W. Yanco\"\n - name: \"Jian D. L. Yen\"\n - name: \"Casey Youngflesh\"\n - name: \"Giacomo Zilio\"\n - name: \"Cédric Zimmer\"\n - name: \"Gregory Mark Zimmerman\"\n - name: \"Rachel A. Zitomer\"\nbibliography: \n - ms/references.bib\n - ms/grateful-refs.bib\nnumber-sections: true\nnumber-depth: 3\ntoc-depth: 3\ntbl-cap-location: top\ndate-modified: last-modified\ngoogle-scholar: true\neditor: \n markdown: \n wrap: sentence\n---\n\n\n\n\n# Introduction\n\nOne value of science derives from its production of replicable, and thus reliable, results.\nWhen we repeat a study using the original methods we should be able to expect a similar result.\nHowever, perfect replicability is not a reasonable goal.\nEffect sizes will vary, and even reverse in sign, by chance alone [@gelman2009].\nObserved patterns can differ for other reasons as well.\nIt could be that we do not sufficiently understand the conditions that led to the original result so when we seek to replicate it, the conditions differ due to some 'hidden moderator'.\nThis hidden moderator hypothesis is described by meta-analysts in ecology and evolutionary biology as 'true biological heterogeneity' [@senior2016].\nThis idea of true heterogeneity is popular in ecology and evolutionary biology, and there are good reasons to expect it in the complex systems in which we work [@shavit2017].\nHowever, despite similar expectations in psychology, recent evidence in that discipline contradicts the hypothesis that moderators are common obstacles to replicability, as variability in results in a large 'many labs' collaboration was mostly unrelated to commonly hypothesized moderators such as the conditions under which the studies were administered [@klein2018].\nAnother possible explanation for variation in effect sizes is that researchers often present biased samples of results, thus reducing the likelihood that later studies will produce similar effect sizes [@open2015; @parker2016; @forstmeier2017; @fraser2018; @parker2023].\nIt also may be that although researchers did successfully replicate the conditions, the experiment, and measured variables, analytical decisions differed sufficiently among studies to create divergent results [@simonsohn2015; @silberzahn2018].\n\nAnalytical decisions vary among studies because researchers have many options.\nResearchers need to decide how to exclude possibly anomalous or unreliable data, how to construct variables, which variables to include in their models, and which statistical methods to use.\nDepending on the dataset, this short list of choices could encompass thousands or millions of possible alternative specifications [@simonsohn2015].\nHowever, researchers making these decisions presumably do so with the goal of doing the best possible analysis, or at least the best analysis within their current skill set.\nThus it seems likely that some specification options are more probable than others, possibly because they have previously been shown (or claimed) to be better, or because they are more well known.\nOf course, some of these different analyses (maybe many of them) may be equally valid alternatives.\nRegardless, on probably any topic in ecology and evolutionary biology, we can encounter differences in choices of data analysis.\nThe extent of these differences in analyses and the degree to which these differences influence the outcomes of analyses and therefore studies' conclusions are important empirical questions.\nThese questions are especially important given that many papers draw conclusions after applying a single method, or even a single statistical model, to analyze a dataset.\n\nThe possibility that different analytical choices could lead to different outcomes has long been recognized [@gelman2013], and various efforts to address this possibility have been pursued in the literature.\nFor instance, one common method in ecology and evolutionary biology involves creating a set of candidate models, each consisting of a different (though often similar) set of predictor variables, and then, for the predictor variable of interest, averaging the slope across all models (i.e. model averaging) [@burnham2002; @grueber2011].\nThis method reduces the chance that a conclusion is contingent upon a single model specification, though use and interpretation of this method is not without challenges [@grueber2011].\nFurther, the models compared to each other typically differ only in the inclusion or exclusion of certain predictor variables and not in other important ways, such as methods of parameter estimation.\nMore explicit examination of outcomes of differences in model structure, model type, data exclusion, or other analytical choices can be implemented through sensitivity analyses [e.g., @noble2017].\nSensitivity analyses, however, are typically rather narrow in scope, and are designed to assess the sensitivity of analytical outcomes to a particular analytical choice rather than to a large universe of choices.\nRecently, however, analysts in the social sciences have proposed extremely thorough sensitivity analysis, including 'multiverse analysis' [@steegen2016] and the 'specification curve' [@simonsohn2015], as a means of increasing the reliability of results.\nWith these methods, researchers identify relevant decision points encountered during analysis and conduct the analysis many times to incorporate many plausible decisions made at each of these points.\nThe study's conclusions are then based on a broad set of the possible analyses and so allow the analyst to distinguish between robust conclusions and those that are highly contingent on particular model specifications.\nThese are useful outcomes, but specifying a universe of possible modelling decisions is not a trivial undertaking.\nFurther, the analyst's knowledge and biases will influence decisions about the boundaries of that universe, and so there will always be room for disagreement among analysts about what to include.\nIncluding more specifications is not necessarily better.\nSome analytical decisions are better justified than others, and including biologically implausible specifications may undermine this process.\nRegardless, these powerful methods have yet to be adopted, and even more limited forms of sensitivity analyses are not particularly widespread.\nMost studies publish a small set of analyses and so the existing literature does not provide much insight into the degree to which published results are contingent on analytical decisions.\n\nDespite the potential major impacts of analytical decisions on variance in results, the outcomes of different individuals' data analysis choices have only recently begun to receive much empirical attention.\nThe only formal exploration of this that we were aware of when we submitted our Stage 1 manuscript were (1) an analysis in social science that asked whether male professional football (soccer) players with darker skin tone were more likely to be issued red cards (ejection from the game for rule violation) than players with lighter skin tone [@silberzahn2018] and (2) an analysis in neuroimaging which evaluated nine separate hypotheses involving the neurological responses detected with fMRI in 108 participants divided between two treatments in a decision making task [@botvinik-nezer2020].\nSeveral others have been published since [e.g., @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023], and we recently learned of an earlier small study in ecology [@stanton-geddes2014].\nIn the red card study, 29 teams designed and implemented analyses of a dataset provided by the study coordinators [@silberzahn2018].\nAnalyses were peer reviewed (results blind) by at least two other participating analysts; a level of scrutiny consistent with standard pre-publication peer review.\nAmong the final 29 analyses, odds-ratios varied from 0.89 to 2.93, meaning point estimates varied from having players with lighter skin tones receive more red cards (odds ratio \\< 1) to a strong effect of players with darker skin tones receiving more red cards (odds ratio \\> 1).\nTwenty of the 29 teams found a statistically-significant effect in the predicted direction of players with darker skin tones being issued more red cards.\nThis degree of variation in peer-reviewed analyses from identical data is striking, but the generality of this finding has only just begun to be formally investigated [e.g., @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023].\n\nIn the neuroimaging study, 70 teams evaluated each of the nine different hypotheses with the available fMRI data [@botvinik-nezer2020].\nThese 70 teams followed a divergent set of workflows that produced a wide range of results.\nThe rate of reporting of statistically significant support for the nine hypotheses ranged from 21$\\%$to 84$\\%$, and for each hypothesis on average, 20 [correlation of 0.89, @fig-ggpairs-bt] of research teams observed effects that differed substantially from the majority of other teams.\nSome of the variability in results among studies could be explained by analytical decisions such as choice of software package, smoothing function, and parametric versus non-parametric corrections for multiple comparisons.\nHowever, substantial variability among analyses remained unexplained, and presumably emerged from the many different decisions each analyst made in their long workflows.\nSuch variability in results among analyses from this dataset and from the very different red-card dataset suggests that sensitivity of analytical outcome to analytical choices may characterize many distinct fields, as several more recent many-analyst studies also suggest [@huntington-klein2021; @schweinsberg2021; @breznau2022].\n\nTo further develop the empirical understanding of the effects of analytical decisions on study outcomes, we chose to estimate the extent to which researchers' data analysis choices drive differences in effect sizes, model predictions, and qualitative conclusions in ecology and evolutionary biology.\nThis is an important extension of the meta-research agenda of evaluating factors influencing replicability in ecology, evolutionary biology, and beyond [@fidler2017].\nTo examine the effects of analytical decisions, we used two different datasets and recruited researchers to analyze one or the other of these datasets to answer a question we defined.\nThe first question was \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\" To answer this question, we provided a dataset that includes brood size manipulations from 332 broods conducted over three years at Wytham Wood, UK. The second question was \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\" For this question, analysts used a dataset that includes, among other variables, number of seedlings in different size classes, percentage cover of different life forms, tree canopy cover, and distance from canopy edge from 351 quadrats spread among 18 sites in Victoria, Australia.\n\nWe explored the impacts of data analysts' choices with descriptive statistics and with a series of tests to attempt to explain the variation among effect sizes and predicted values of the dependent variable produced by the different analysis teams for both datasets separately.\nTo describe the variability, we present forest plots of the standardized effect sizes and predicted values produced by each of the analysis teams, estimate heterogeneity (both absolute, $\\tau^2$, and proportional, $I^2$) in effect size and predicted values among the results produced by these different teams, and calculate a similarity index that quantifies variability among the predictor variables selected for the different statistical models constructed by the different analysis teams.\nThese descriptive statistics provide the first estimates of the extent to which explanatory statistical models and their outcomes in ecology and evolutionary biology vary based on the decisions of different data analysts.\nWe then quantified the degree to which the variability in effect size and predicted values could be explained by (1) variation in the quality of analyses as rated by peer reviewers and (2) the similarity of the choices of predictor variables between individual analyses.\n\n# Methods\n\nThis project involved a series of steps (1-6) that began with identifying datasets for analyses and continued through recruiting independent groups of scientists to analyze the data, allowing the scientists to analyze the data as they saw fit, generating peer review ratings of the analyses (based on methods, not results), evaluating the variation in effects among the different analyses, and producing the final manuscript.\n\n## **Step 1: Select Datasets**\n\nWe used two previously unpublished datasets, one from evolutionary ecology and the other from ecology and conservation.\n\n**Evolutionary ecology**\n\nOur evolutionary ecology dataset is relevant to a sub-discipline of life-history research which focuses on identifying costs and trade-offs associated with different phenotypic conditions.\nThese data were derived from a brood-size manipulation experiment imposed on wild birds nesting in boxes provided by researchers in an intensively studied population.\nUnderstanding how the growth of nestlings is influenced by the numbers of siblings in the nest can give researchers insights into factors such as the evolution of clutch size, determination of provisioning rates by parents, and optimal levels of sibling competition [@vanderwerf1992; @dekogel1997; @royle1999; @verhulst2006; @nicolaus2009].\nData analysts were provided this dataset and instructed to answer the following question: \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\"\n\nResearchers conducted brood size manipulations and population monitoring of blue tits at Wytham Wood, a 380 ha woodland in Oxfordshire, U.K (1º 20'W, 51º 47'N).\nResearchers regularly checked approximately 1100 artificial nest boxes at the site and monitored the 330 to 450 blue tit pairs occupying those boxes in 2001-2003 during the experiment.\nNearly all birds made only one breeding attempt during the April to June study period in a given year.\nAt each blue tit nest, researchers recorded the date the first egg appeared, clutch size, and hatching date.\nFor all chicks alive at age 14 days, researchers measured mass and tarsus length and fitted a uniquely numbered, British Trust for Ornithology (BTO) aluminium leg ring.\nResearchers attempted to capture all adults at their nests between day 6 and day 14 of the chick-rearing period.\nFor these captured adults, researchers measured mass, tarsus length, and wing length and fitted a uniquely numbered BTO leg ring.\nDuring the 2001-2003 breeding seasons, researchers manipulated brood sizes using cross fostering.\nThey matched broods for hatching date and brood size and moved chicks between these paired nests one or two days after hatching.\nThey sought to either enlarge or reduce all manipulated broods by approximately one fourth.\nTo control for effects of being moved, each reduced brood had a portion of its brood replaced by chicks from the paired increased brood, and vice versa.\nNet manipulations varied from plus or minus four chicks in broods of 12 to 16 to plus or minus one chick in broods of 4 or 5.\nResearchers left approximately one third of all broods unmanipulated.\nThese unmanipulated broods were not selected systematically to match manipulated broods in clutch size or laying date.\nWe have mass and tarsus length data from 3720 individual chicks divided among 167 experimentally enlarged broods, 165 experimentally reduced broods, and 120 unmanipulated broods.\nThe full list of variables included in the dataset is publicly available (), along with the data ().\n\n::: {.callout-note appearance=\"simple\"}\n**Additional Explanation:** Shortly after beginning to recruit analysts, several analysts noted a small set of related errors in the blue tit dataset.\nWe corrected the errors, replaced the dataset on our OSF site, and emailed the analysts on 19 April 2020 to instruct them to use the revised data.\nThe email to analysts is available here ().\nThe errors are explained in that email.\n:::\n\n**Ecology and conservation**\n\nOur ecology and conservation dataset is relevant to a sub-discipline of conservation research which focuses on investigating how best to revegetate private land in agricultural landscapes.\nThese data were collected on private land under the Bush Returns program, an incentive system where participants entered into a contract with the Goulburn Broken Catchment Management Authority and received annual payments if they executed predetermined restoration activities.\nThis particular dataset is based on a passive regeneration initiative, where livestock grazing was removed from the property in the hopes that the *Eucalyptus* spp.\noverstorey would regenerate without active (and expensive) planting.\nAnalyses of some related data have been published [@miles2008; @vesk2016] but those analyses do not address the question analysts answered in our study.\nData analysts were provided this dataset and instructed to answer the following question: \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\".\n\nResearchers conducted three rounds of surveys at 18 sites across the Goulburn Broken catchment in northern Victoria, Australia in winter and spring 2006 and autumn 2007.\nIn each survey period, a different set of 15 x 15 m quadrats were randomly allocated across each site within 60 m of existing tree canopies.\nThe number of quadrats at each site depended on the size of the site, ranging from four at smaller sites to 11 at larger sites.\nThe total number of quadrats surveyed across all sites and seasons was 351.\nThe number of *Eucalyptus* spp. seedlings was recorded in each quadrat along with information on the GPS location, aspect, tree canopy cover, distance to tree canopy, and position in the landscape.\nGround layer plant species composition was recorded in three 0.5 x 0.5 m sub-quadrats within each quadrat.\nSubjective cover estimates of each species as well as bare ground, litter, rock and moss/lichen/soil crusts were recorded.\nSubsequently, this was augmented with information about the precipitation and solar radiation at each GPS location.\nThe full list of variables included in the dataset is publicly available (), along with the data ().\n\n## **Step 2: Recruitment and Initial Survey of Analysts**\n\nThe lead team (TP, HF, SN, EG, SG, PV, DH, FF) created a publicly available document providing a general description of the project ().\nThe project was advertised at conferences, via Twitter, using mailing lists for ecological societies (including Ecolog, Evoldir, and lists for the Environmental Decisions Group, and Transparency in Ecology and Evolution), and via word of mouth.\nThe target population was active ecology, conservation, or evolutionary biology researchers with a graduate degree (or currently studying for a graduate degree) in a relevant discipline.\nResearchers could choose to work independently or in a small team.\nFor the sake of simplicity, we refer to these as 'analysis teams' though some comprised one individual.\nWe aimed for a minimum of 12 analysis teams independently evaluating each dataset (see sample size justification below).\nWe simultaneously recruited volunteers to peer review the analyses conducted by the other volunteers through the same channels.\nOur goal was to recruit a similar number of peer reviewers and analysts, and to ask each peer reviewer to review a minimum of four analyses.\nIf we were unable to recruit at least half the number of reviewers as analysis teams, we planned to ask analysts to serve also as reviewers (after they had completed their analyses), but this was unnecessary.\nTherefore, no data analysts peer reviewed analyses of the dataset they had analyzed.\nAll analysts and reviewers were offered the opportunity to share co-authorship on this manuscript and we planned to invite them to participate in the collaborative process of producing the final manuscript.\nAll analysts signed \\[digitally\\] a consent (ethics) document () approved by the Whitman College Institutional Review Board prior to being allowed to participate.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:**\n\nDue to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft.\n:::\n\nWe identified our minimum number of analysts per dataset by considering the number of effects needed in a meta-analysis to generate an estimate of heterogeneity ($\\tau^{2}$) with a 95$\\%$confidence interval that does not encompass zero.\nThis minimum sample size is invariant regardless of $\\tau^{2}$.\nThis is because the same t-statistic value will be obtained by the same sample size regardless of variance ($\\tau^{2}$).\nWe see this by first examining the formula for the standard error, SE for variance, ($\\tau^{2}$) or SE($\\tau^{2}$) assuming normality in an underlying distribution of effect sizes [@knight2000]:\n\n$$\nSE({{τ}^2})=\\sqrt{\\frac{{t}^4}{n-1}}\n$$ {#eq-SE-tau}\n\nand then rearranging the above formula to show how the t-statistic is independent of $\\tau^2$, as seen below.\n\n$$\nt=\\frac{{τ}^2}{SE({{τ}^2})}=\\sqrt{\\frac{n-1}{2}}\n$$ {#eq-t-tau}\n\nWe then find a minimum n = 12 according to this formula.\n\n## **Step 3: Primary Data Analyses**\n\nAnalysis teams registered and answered a demographic and expertise survey ().\nWe then provided them with the dataset of their choice and requested that they answer a specific research question.\nFor the evolutionary ecology dataset that question was \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\" and for the conservation ecology dataset it was \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\" Once their analysis was complete, they answered a structured survey (), providing analysis technique, explanations of their analytical choices, quantitative results, and a statement describing their conclusions.\nThey also were asked to upload their analysis files (including the dataset as they formatted it for analysis and their analysis code \\[if applicable\\]) and a detailed journal-ready statistical methods section.\n\n::: {.callout-note appearance=\"simple\"}\n**Additional Explanation:** \n\nAs is common in many studies in ecology and evolutionary biology, the datasets we provided contained many variables, and the research questions we provided could be addressed by our datasets in many different ways. For instance, volunteer analysts had to choose the dependent (response) variable and the independent variable, and make numerous other decisions about which variables and data to use and how to structure their model.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:**\n\nWe originally planned to have analysts complete a single survey (), but after we evaluated the results of that survey, we realized we would need a second survey () to adequately collect the information we needed to evaluate heterogeneity of results (step 5).\nWe provided a set of detailed instructions with the follow-up survey, and these instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ).\n:::\n\n## **Step 4: Peer Reviews of Analyses**\n\nAt minimum, each analysis was evaluated by four different reviewers, and each volunteer peer reviewer was randomly assigned methods sections from at least four analyst teams (the exact number varied).\nEach peer reviewer registered and answered a demographic and expertise survey identical to that asked of the analysts, except we did not ask about 'team name' since reviewers did not work in teams.\nReviewers evaluated the methods of each of their assigned analyses one at a time in a sequence determined by the project leaders.\nWe systematically assigned the sequence so that, if possible, each analysis was allocated to each position in the sequence for at least one reviewer.\nFor instance, if each reviewer were assigned four analyses to review, then each analysis would be the first analysis assigned to at least one reviewer, the second analysis assigned to another reviewer, the third analysis assigned to yet another reviewer, and the fourth analysis assigned to a fourth reviewer.\nBalancing the order in which reviewers saw the analyses controls for order effects, e.g. a reviewer might be less critical of the first methods section they read than the last.\n\nThe process for a single reviewer was as follows.\nFirst, the reviewer received a description of the methods of a single analysis.\nThis included the narrative methods section, the analysis team's answers to our survey questions regarding their methods, including analysis code, and the dataset.\nThe reviewer was then asked, in an online survey (), to rate that analysis on a scale of 0-100 based on this prompt: \"Rate the overall appropriateness of this analysis to answer the research question (*one of the two research questions inserted here*) with the available data. To help you calibrate your rating, please consider the following guidelines:\n\n>\n- 100. A perfect analysis with no conceivable improvements from the reviewer\n- 75. An imperfect analysis but the needed changes are unlikely to dramatically alter outcomes\n- 50. A flawed analysis likely to produce either an unreliable estimate of the relationship or an over-precise estimate of uncertainty\n- 25. A flawed analysis likely to produce an unreliable estimate of the relationship and an over-precise estimate of uncertainty\n- 0. A dangerously misleading analysis, certain to produce both an estimate that is wrong and a substantially over-precise estimate of uncertainty that places undue confidence in the incorrect estimate.\n>\n\\*Please note that these values are meant to calibrate your ratings.\nWe welcome ratings of any number between 0 and 100.\n\nAfter providing this rating, the reviewer was presented with this prompt, in multiple-choice format: \"Would the analytical methods presented produce an analysis that is (a) publishable as is, (b) publishable with minor revision, (c) publishable with major revision, (d) deeply flawed and unpublishable?\" The reviewer was then provided with a series of text boxes and the following prompts: \"Please explain your ratings of this analysis. Please evaluate the choice of statistical analysis type. Please evaluate the process of choosing variables for and structuring the statistical model. Please evaluate the suitability of the variables included in (or excluded from) the statistical model. Please evaluate the suitability of the structure of the statistical model. Please evaluate choices to exclude or not exclude subsets of the data. Please evaluate any choices to transform data (or, if there were no transformations, but you think there should have been, please discuss that choice).\" After submitting this review, a methods section from a second analysis was then made available to the reviewer.\nThis same sequence was followed until all analyses allocated to a given reviewer were provided and reviewed.\nAfter providing the final review, the reviewer was simultaneously provided with all four (or more) methods sections the reviewer had just completed reviewing, the option to revise their original ratings, and a text box to provide an explanation.\nThe invitation to revise the original ratings was as follows: \"If, now that you have seen all the analyses you are reviewing, you wish to revise your ratings of any of these analyses, you may do so now.\" The text box was prefaced with this prompt: \"Please explain your choice to revise (or not to revise) your ratings.\"\n\n::: {.callout-note appearance=\"simple\"}\n**Additional explanation:** Unregistered analysis.\n\nTo determine how consistent peer reviewers were in their ratings, we assessed inter-rater reliability among reviewers for both the categorical and quantitative ratings combining blue tit and *Eucalyptus* data using Krippendorff's alpha for ordinal and continuous data respectively.\nThis provides a value that is between -1 (total disagreement between reviewers) and 1 (total agreement between reviewers).\n:::\n\n## **Step 5: Evaluate Variation**\n\nThe lead team conducted the analyses outlined in this section.\nWe described the variation in model specification in several ways.\nWe calculated summary statistics describing variation among analyses, including mean, SD, and range of number of variables per model included as fixed effects, the number of interaction terms, the number of random effects, and the mean, SD, and range of sample sizes.\nWe also present the number of analyses in which each variable was included.\nWe summarized the variability in standardized effect sizes and predicted values of dependent variables among the individual analyses using standard random effects meta-analytic techniques.\nFirst, we derived standardized effect sizes from each individual analysis.\nWe did this for all linear models or generalized linear models by converting the $t$ value and the degree of freedom ($df$) associated with regression coefficients (e.g. the effect of the number of siblings \\[predictor\\] on growth \\[response\\] or the effect of grass cover \\[predictor\\] on seedling recruitment \\[response\\]) to the correlation coefficient, $r$, using the following:\n\n$$\nr=\\sqrt{\\frac{{t}^2}{\\left({{t}^2}+df\\right) }}\n$$ {#eq-t-to-r}\n\nThis formula can only be applied if $t$ and $df$ values originate from linear or generalized linear models [GLMs; @nakagawa2007].\nIf, instead, linear mixed-effects models (LMMs) or generalized linear mixed-effects models (GLMMs) were used by a given analysis, the exact $df$ cannot be estimated.\nHowever, adjusted $df$ can be estimated, for example, using the Satterthwaite approximation of $df$, ${df}_S$, [note that SAS uses this approximation to obtain $df$ for LMMs and GLMMs; @luke2017].\nFor analyses using either LMMs or GLMMs that do not produce ${df}_S$ we planned to obtain ${df}_S$ by rerunning the same (G)LMMs using the `lmer()` or `glmer()` function in the *lmerTest* package in R [@kuznetsova2017; @base].\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:**\n\nRather than re-run these analyses ourselves, we sent a follow-up survey (referenced above under \"Primary data analyses\") to analysts and asked them to follow our instructions for producing this information.\nThe instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ).\n:::\n\nWe then used the $t$ values and $df_S$ from the models to obtain $r$ as per the formula above.\nAll $r$ and accompanying $df$ (or $df_S$) were converted to Fisher's $Z_r$ \n\n$$\nZ_r = \\frac{1}{2} \\ln(\\dfrac{1+r}{1-r})\n$$ {#eq-Zr}\n\nand its sampling variance; $1/(n – 3)$ where $n = df + 1$.\nAny analyses from which we could not derive a signed $Z_r$, for instance one with a quadratic function in which the slope changed sign, were considered unusable for analyses of $Z_r$ .\nWe expected such analyses would be rare.\nIn fact, most submitted analyses excluded from our meta-analysis of $Z_r$ were excluded because of a lack of sufficient information provided by the analyst team rather than due to the use of effects that could not be converted to $Z_r$.\nRegardless, as we describe below, we generated a second set of standardized effects (predicted values) that could (in principle) be derived from any explanatory model produced by these data.\n\nBesides $Z_r$, which describes the strength of a relationship based on the amount of variation in a dependent variable explained by variation in an independent variable, we also examined differences in the shape of the relationship between the independent and dependent variables.\nTo accomplish this, we derived a point estimate (out-of-sample predicted value) for the dependent variable of interest for each of three values of our primary independent variable.\nWe originally described these three values as associated with the 25th percentile, median, and 75th percentile of the independent variable and any covariates.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:** The original description of the out-of-sample specifications did not account for the facts that (a) some variables are not distributed in a way that allowed division in percentiles and that (b) variables could be either positively or negatively correlated with the dependent variable.\nWe provide a more thorough description here: We derived three point-estimates (out-of-sample predicted values) for the dependent variable of interest; one for each of three values of our primary independent variable that we specified.\nWe also specified values for all other variables that could have been included as independent variables in analysts' models so that we could derive the predicted values from a fully specified version of any model produced by analysts.\nFor all potential independent variables, we selected three values or categories.\nOf the three we selected, one was associated with small, one with intermediate, and one with large values of one typical dependent variable (day 14 chick weight for the blue tit data and total number of seedlings for the *Eucalyptus* data; analysts could select other variables as their dependent variable, but the others typically correlated with the two identified here).\nFor continuous variables, this means we identified the 25th percentile, median, and 75th percentile and, if the slope of the linear relationship between this variable and the typical dependent variable was positive, we left the quartiles ordered as is.\nIf, instead, the slope was negative, we reversed the order of the independent variable quartiles so that the 'lower' quartile value was the one associated with the lower value for the dependent variable.\nIn the case of categorical variables, we identified categories associated with the 25th percentile, median, and 75th percentile values of the typical dependent variable after averaging the values for each category.\nHowever, for some continuous and categorical predictors, we also made selections based on the principle of internal consistency between certain related variables, and we fixed a few categorical variables as identical across all three levels where doing so would simplify the modelling process (specification tables available: blue tit: ; *Eucalyptus*: ).\n:::\n\nWe used the 25th and 75th percentiles rather than minimum and maximum values to reduce the chance of occupying unrealistic parameter space.\nWe planned to derive these predicted values from the model information provided by the individual analysts.\nAll values (predictions) were first transformed to the original scale along with their standard errors (SE); we used the delta method [@verhoef2012] for the transformation of SE.\nWe used the square of the SE associated with predicted values as the sampling variance in the meta-analyses described below, and we planned to analyze these predicted values in exactly the same ways as we analyzed $Z_r$ in the following analyses.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:** Because analysts of blue tit data chose different dependent variables on different scales, after transforming out-of-sample values to the original scales, we standardized all values as z scores ('standard scores') to put all dependent variables on the same scale and make them comparable.\nThis involved taking each relevant value on the original scale (whether a predicted point estimate or a SE associated with that estimate) and subtracting the value in question from the mean value of that dependent variable derived from the full dataset and then dividing this difference by the standard deviation, SD, corresponding to the mean from the full dataset.\nThus, all our out-of-sample prediction values from the blue tit data are from a distribution with the mean of 0 and SD of 1.\nWe did not add this step for the *Eucalyptus* data because (a) all responses were on the same scale (counts of *Eucalyptus* stems) and were thus comparable and (b) these data, with many zeros and high skew, are poorly suited for z scores.\n:::\n\nWe plotted individual effect size estimates ($Z_r$) and predicted values of the dependent variable ($y_i$) and their corresponding 95$\\%$confidence / credible intervals in forest plots to allow visualization of the range and precision of effect size and predicted values.\nFurther, we included these estimates in random effects meta-analyses [@higgins2003; @borenstein2017] using the *metafor* package in R [@metafor; @base]:\n\n$$\nZ_r \\sim 1 + \\left(1 \\vert \\text{analysisID} \\right)\n$$ {#eq-MA_Zr}\n\n$$ \ny_i \\sim 1 + \\left(1 \\vert \\text{analysisID} \\right)\n$$ {#eq-MA_yi}\n\nwhere $y_i$ is the predicted value for the dependent variable at the 25th percentile, median, or 75th percentile of the independent variables.\nThe individual $Z_r$ effect sizes were weighted with the inverse of sampling variance for $Z_r$.\nThe individual predicted values for dependent variable ($y_i$) were weighted by the inverse of the associated $SE^2$ (original registration omitted \"inverse of the\" in error).\nThese analyses provided an average $Z_r$ score or an average $y_i$ with corresponding 95$\\%$confidence interval and allowed us to estimate two heterogeneity indices, $\\tau^2$ and $I^2$.\nThe former, $\\tau^2$, is the absolute measure of heterogeneity or the between-study variance (in our case, between-effect variance) whereas $I^2$ is a relative measure of heterogeneity.\nWe obtained the estimate of relative heterogeneity ($I^2$) by dividing the between-effect variance by the sum of between-effect and within-effect variance (sampling error variance).\n$I^2$ is thus, in a standard meta-analysis, the proportion of variance that is due to heterogeneity as opposed to sampling error.\nWhen calculating $I^2$, within-study variance is amalgamated across studies to create a \"typical\" within-study variance which serves as the sampling error variance [@higgins2003; @borenstein2017].\nOur goal here was to visualize and quantify the degree of variation among analyses in effect size estimates [@nakagawa2007].\nWe did not test for statistical significance.\n\n::: {.callout-note appearance=\"simple\"}\n**Additional explanation:** Our use of $I^{2}$ to quantify heterogeneity violates an important assumption, but this violation does not invalidate our use of $I^{2}$ as a metric of how much heterogeneity can derive from analytical decisions.\nIn standard meta-analysis, the statistic $I^{2}$ quantifies the proportion of variance that is greater than we would expect if differences among estimates were due to sampling error alone [@rosenberg2013].\nHowever, it is clear that this interpretation does not apply to our value of $I^{2}$ because $I^{2}$ assumes that each estimate is based on an independent sample (although these analyses can account for non-independence via hierarchical modelling), whereas all our effects were derived from largely or entirely overlapping subsets of the same dataset.\nDespite this, we believe that $I^{2}$ remains a useful statistic for our purposes.\nThis is because, in calculating $I^{2}$, we are still setting a benchmark of expected variation due to sampling error based on the variance associated with each separate effect size estimate, and we are assessing how much (if at all) the variability among our effect sizes exceeds what would be expected had our effect sizes been based on independent data.\nIn other words, our estimates can tell us how much proportional heterogeneity is possible from analytical decisions alone when sample sizes (and therefore meta-analytic within-estimate variance) are similar to the ones in our analyses.\nAmong other implications, our violation of the independent sample assumption means that we (dramatically) over-estimate the variance expected due to sampling error, and because $I^{2}$ is a proportional estimate, we thus underestimate the actual proportion of variance due to differences among analyses other than sampling error.\nHowever, correcting this underestimation would create a trivial value since we designed the study so that much of the variance would derive from analytic decisions as opposed to differences in sampled data.\nInstead, retaining the $I^{2}$ value as typically calculated provides a useful comparison to $I^{2}$ values from typical meta-analyses.\n\nInterpretation of $\\tau^2$ also differs somewhat from traditional meta-analysis, and we discuss this further in the Results.\n:::\n\nFinally, we assessed the extent to which deviations from the meta-analytic mean by individual effect sizes ($Z_r$) or the predicted values of the dependent variable ($y_i$) were explained by the peer rating of each analysis team's method section, by a measurement of the distinctiveness of the set of predictor variables included in each analysis, and by the choice of whether or not to include random effects in the model.\nThe deviation score, which served as the dependent variable in these analyses, is the absolute value of the difference between the meta-analytic mean $\\bar{Z_r}$ (or $\\bar{y_i}$) and the individual $Z_r$ (or $y_i$) estimate for each analysis.\nWe used the Box-Cox transformation on the absolute values of deviation scores to achieve an approximately normal distribution [c.f. @fanelli2013; @fanelli2017].\nWe described variation in this dependent variable with both a series of univariate analyses and a multivariate analysis.\nAll these analyses were general linear (mixed) models.\nThese analyses were secondary to our estimation of variation in effect sizes described above.\nWe wished to quantify relationships among variables, but we had no a priori expectation of effect size and made no dichotomous decisions about statistical significance.\n\n::: {#box-weight-deviation .callout-note appearance=\"simple\"}\n**Additional Explanation:** \n\nIn our meta-analyses based on Box-Cox transformed deviation scores, we leave these deviation scores unweighted. \nThis is consistent with our registration, which did not mention weighting these scores. However, the fact that we did not mention weighting the scores was actually an error: we had intended to weight them, as is standard in meta-analysis, using the inverse variance of the Box-Cox transformed deviation scores [@eq-folded-variance].\nUnfortunately, when we did conduct the weighted analyses, they produced results in which some weighted estimates differed radically from the unweighted estimate because the weights were invalid. \nSuch invalid weights can sometimes occur when the variance (upon which the weights depend) is partly a function of the effect size, as in our Box-Cox transformed deviation scores [@nakagawa2022]. \nIn the case of the *Eucalyptus* analyses, the most extreme outlier was weighted much more heavily (by close to two orders of magnitude) than any other effect sizes because the effect size was, itself, so high. \nTherefore, we made the decision to avoid weighting by inverse variance in all analyses of the Box-Cox transformed deviation scores. \nThis was further justified because (a) most analyses have at least some moderately unreliable weights, and (b) the sample sizes were mostly very similar to each other across submitted analyses, and so meta-analytic weights are not particularly important here.\nWe systematically investigated the impact of different weighting schemes and random effects on model convergence and results, see @sec-post-hoc-weights-analysis for more details.\n:::\n\nWhen examining the extent to which reviewer ratings (on a scale from 0 to 100) explained deviation from the average effect (or predicted value), each analysis had been rated by multiple peer reviewers, so for each reviewer score to be included, we include each deviation score in the analysis multiple times.\nTo account for the non-independence of multiple ratings of the same analysis, we planned to include analysis identity as a random effect in our general linear mixed model in the *lme4* package in R [@lme4; @base].\nTo account for potential differences among reviewers in their scoring of analyses, we also planned to include reviewer identity as a random effect:\n\n$$ \n\\begin{align}\n\\text{DeviationScore}_j = \\text{BoxCox}(abs(\\text{DeviationFromMean}_{j})) \\\\\n{\\text{DeviationScore}}_{ij} \\sim Rating_{ij} + \\text{ReviewerID}_{i} + {\\text{AnalysisID}}_{j} \\\\\n{\\text{ReviewerID}}_i \\sim \\mathcal{N}(0,\\sigma_i^2) \\\\\n{\\text{AnalysisID}}_j \\sim \\mathcal{N}(0,\\sigma_j^2)\n\\end{align}\n$$ {#eq-deviation-rating}\n\nWhere $\\text{DeviationFromMean}_{j}$ is the deviation from the meta-analytic mean for the $j$th analysis, $\\text{ReviewerID}_{i}$ is the random intercept assigned to each $i$ reviewer, and $\\text{AnalysisID}_{j}$ is the random intercept assigned to each $j$ analysis, both of which are assumed to be normally distributed with a mean of 0 and a variance of $\\sigma^{2}$.\nAbsolute deviation scores were Box-Cox transformed using the `step_box_cox()` function from the *timetk* package in R [@timetk; @base].\n\n\n\n\n\nWe conducted a similar analysis with the four categories of reviewer ratings ((1) deeply flawed and unpublishable, (2) publishable with major revision, (3) publishable with minor revision, (4) publishable as is) set as ordinal predictors numbered as shown here.\nAs with the analyses above, we planned for these analyses to also include random effects of analysis identity and reviewer identity.\nBoth of these analyses (1: 1-100 ratings as the fixed effect, 2: categorical ratings as the fixed effects) were planned to be conducted eight times for each dataset.\nEach of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) were to be compared once to the initial ratings provided by the peer reviewers, and again based on the revised ratings provided by the peer reviewers.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:**\n\n1. We planned to include random effects of both analysis identity and reviewer identity in these models comparing reviewer ratings with deviation scores.\n However, after we received the analyses, we discovered that a subset of analyst teams had either conducted multiple analyses and/or identified multiple effects per analysis as answering the target question.\n We therefore faced an even more complex potential set of random effects.\n We decided that including team ID, analysis ID, and effect ID along with reviewer ID as random effects in the same model would almost certainly lead to model fit problems, and so we started with simpler models including just effect ID and reviewer ID.\n However, even with this simpler structure, our dataset was sparse, with reviewers rating a small number of analyses, resulting in models with singular fit (@sec-convergence-singularity).\n Removing one of the random effects was necessary for the models to converge.\n The models that included the categorical quality rating converged when including reviewer ID, and the models that included the continuous quality rating converged when including effect ID.\n\n2. We conducted analyses only with the final peer ratings after the opportunity for revision, not with the initial ratings.\n This was because when we recorded the final ratings, they over-wrote the initial ratings, and so we did not have access to those initial values.\n:::\n\nThe next set of univariate analyses sought to explain deviations from the mean effects based on a measure of the distinctiveness of the set of variables included in each analysis.\nAs a 'distinctiveness' score, we used Sorensen's Similarity Index (an index typically used to compare species composition across sites), treating variables as species and individual analyses as sites.\nTo generate an individual Sorensen's value for each analysis required calculating the pairwise Sorensen's value for all pairs of analyses (of the same dataset), and then taking the average across these Sorensen's values for each analysis.\nWe calculated the Sorensen's index values using the *betapart* package [@betapart] in R:\n\n$$\n\\beta_{Sorensen} = \\frac{b+c}{2a+b+c}\n$$ {#eq-sorensen}\n\nwhere $a$ is the number of variables common to both analyses, $b$ is the number of variables that occur in the first analysis but not in the second and $c$ is the number of variables that occur in the second analysis.\nWe then used the per-model average Sorensen's index value as an independent variable to predict the deviation score in a general linear model, and included no random effect since each analysis is included only once, in R [@base]:\n\n$$ \n\\text{DeviationScore}_{j} \\sim \\beta \\text{Sorensen}_{j}\n$$ {#eq-deviation}\n\n::: {.callout-note appearance=\"simple\"}\n**Additional explanation:**\n\nWhen we planned this analysis, we anticipated that analysts would identify a single primary effect from each model, so that each model would appear in the analysis only once.\nOur expecation was incorrect because some analysts identified \\>1 effect per analysis, but we still chose to specify our model as registered and not use a random effect.\nThis is because most models produced only one effect and so we expected that specifying a random effect to account for the few cases where \\>1 effect was included for a given model would prevent model convergence.\n\nNote that this analysis contrasts with the analyses in which we used reviewer ratings as predictors because in the analyses with reviewer ratings, each effect appeared in the analysis approximately four times due to multiple reviews of each analysis, and so it was much more important to account for that variance through a random effect.\n:::\n\nNext, we assessed the relationship between the inclusion of random effects in the analysis and the deviation from the mean effect size. We anticipated that most analysts would use random effects in a mixed model framework, but if we were wrong, we wanted to evaluate the differences in outcomes when using random effects versus not using random effects. Thus if there were at least 5 analyses that did and 5 analyses that did not include random effects, we would add a binary predictor variable “random effects included (yes/no)” to our set of univariate analyses and would add this predictor variable to our multivariate model described below. This standard was only met for the *Eucalyptus* analyses, and so we only examined inclusion of random effects as a predictor variable in meta-analysis of this set to analyses. \n\nFinally, we conducted a multivariate analysis with the five predictors described above (peer ratings 0-100 and peer ratings of publishability 1-4; both original and revised and Sorensen's index, plus a sixth for *Eucalyptus*, presence /absence of random effects) with random effects of analysis identity and reviewer identity in the *lme4* package in R [@lme4; @base].\nWe had stated here in the text that we would use only the revised (final) peer ratings in this analysis, so the absence of the initial ratings is not a deviation from our plan:\n\n$$ \n\\begin{align}\n{\\text{DeviationScore}}_{j} \\sim {\\text{RatingContinuous}}_{ij}\\space+ \\\\\n{\\text{RatingCategorical}}_{ij} \\space + \\\\\n{\\beta\\text{Sorensen}}_{j} \\space + \\\\\n{\\text{AnalysisID}}_{j} \\space + \\\\\n{\\text{ReviewerID}}_{i} \\\\\n{\\text{ReviewerID}}_i \\sim \\mathcal{N}(0,\\sigma_i^2) \\\\\n{\\text{AnalysisID}}_j \\sim \\mathcal{N}(0,\\sigma_j^2)\n\\end{align}\n$$ {#eq-deviation-multivar}\n\nWe conducted all the analyses described above eight times; for each of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) one time for each of the two datasets.\n\nWe have publicly archived all relevant data, code, and materials on the Open Science Framework ().\nArchived data includes the original datasets distributed to all analysts, any edited versions of the data analyzed by individual groups, and the data we analyzed with our meta-analyses, which include the effect sizes derived from separate analyses, the statistics describing variation in model structure among analyst groups, and the anonymized answers to our surveys of analysts and peer reviewers.\nSimilarly, we have archived both the analysis code used for each individual analysis (where available) and the code from our meta-analyses.\nWe have also archived copies of our survey instruments from analysts and peer reviewers.\n\nOur rules for excluding data from our study were as follows.\nWe excluded from our synthesis any individual analysis submitted after we had completed peer review or those unaccompanied by analysis files that allow us to understand what the analysts did.\nWe also excluded any individual analysis that did not produce an outcome that could be interpreted as an answer to our primary question (as posed above) for the respective dataset.\nFor instance, this means that in the case of the data on blue tit chick growth, we excluded any analysis that did not include something that can be interpreted as growth or size as a dependent (response) variable, and in the case of the *Eucalyptus* establishment data, we excluded any analysis that did not include a measure of grass cover among the independent (predictor) variables.\nAlso, as described above, any analysis that could not produce an effect that could be converted to a signed $Z_r$ was excluded from analyses of $Z_r$.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:**\n\nSome analysts had difficulty implementing our instructions to derive the out-of-sample predictions, and in some cases (especially for the *Eucalyptus* data), they submitted predictions with implausibly extreme values.\nWe believed these values were incorrect and thus made the conservative decision to exclude out-of-sample predictions where the estimates were \\> 3 standard deviations from the mean value from the full dataset.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n**Additional explanation:** We conducted several unregistered analyses.\n\n**1. Evaluating model fit.**\n\nWe evaluated all fitted models using the `performance()` function from the *performance* package [@performance] and the `glance()` function from the *broom.mixed* package [@broommixed].\nFor all models, we calculated the square root of the residual variance (Sigma) and the root mean squared error (RMSE).\nFor GLMMs `performance()` calculates the marginal and conditional $R^2$ values as well as the contribution of random effects (ICC), based on Nakagawa et al. [-@nakagawa2017].\nThe conditional $R^2$ accounts for both the fixed and random effects, while the marginal $R^2$ considers only the variance of the fixed effects.\nThe contribution of random effects is obtained by subtracting the marginal $R^2$ from the conditional $R^2$.\n\n**2. Exploring outliers and analysis quality.**\n\nAfter seeing the forest plots of $Z_r$ values and noticing the existence of a small number of extreme outliers, especially from the *Eucalyptus* analyses, we wanted to understand the degree to which our heterogeneity estimates were influenced by these outliers.\nTo explore this question, we removed the highest two and lowest two values of $Z_r$ in each dataset and re-calculated our heterogeneity estimates.\n\nTo help understand the possible role of the quality of analyses in driving the heterogeneity we observed among estimates of $Z_r$, we created forest plots and recalculated our heterogeneity estimates after removing all effects from analysis teams that had received at least one rating of \"deeply flawed and unpublishable\" and then again after removing all effects from analysis teams with at least one rating of either \"deeply flawed and unpublishable\" or \"publishable with major revisions\".\nWe also used self-identified levels of statistical expertise to examine heterogeneity when we retained analyses only from analysis teams that contained at least one member who rated themselves as \"highly proficient\" or \"expert\" (rather than \"novice\" or \"moderately proficient\") in conducting statistical analyses in their research area in our intake survey.\nAdditionally, to assess potential impacts of highly collinear predictor variables on estimates of $Z_r$ in blue tit analyses, we created forest plots and recalculated our heterogeneity estimates after we removed analyses that contained the brood count after manipulation and the highly correlated (correlation of 0.89, @fig-ggpairs-bt) brood count at day 14. This removal included the one effect based on a model that contained both these variables and a third highly correlated variable, the estimate of number of chicks fledged (the only model that included the estimate of number of chicks fledged). We did not conduct a similar analysis for the *Eucalyptus* dataset because there were no variables highly collinear with the primary predictors (grass cover variables) in that dataset (@fig-ggpairs-eucalyptus). \n\n**3. Exploring possible impacts of lower quality estimates of degrees of freedom.**\n\nOur meta-analyses of variation in $Z_r$ required variance estimates derived from estimates of the degrees of freedom in original analyses from which $Z_r$ estimates were derived.\nWhile processing the estimates of degrees of freedom submitted by analysts, we identified a subset of these estimates in which we had lower confidence because two or more effects from the same analysis were submitted with identical degrees of freedom.\nWe therefore conducted a second set of (more conservative) meta-analyses that excluded these $Z_r$ estimates with identical estimates of degrees of freedom and we present these analyses in the supplement.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n**Additional explanation:** Best practices in many-analysts research.\n\nAfter we initiated our project, a paper was published outlining best practices in many-analysts studies [@aczel2021].\nAlthough we did not have access to this document when we implemented our project, our study complies with these practices nearly completely.\nThe one exception is that although we requested analysis code from analysts, we did not require submission of code.\n:::\n\n## **Step 6: Facilitated Discussion and Collaborative Write-Up of Manuscript**\n\nWe planned for analysts and initiating authors to discuss the limitations, results, and implications of the study and collaborate on writing the final manuscript for review as a stage-2 Registered Report.\n\n::: {.callout-note appearance=\"simple\"}\n**Preregistration Deviation:** As described above, due to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft.\n:::\n\nWe built an R package, `ManyEcoEvo` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo` package repository and its outputs are made available to users of the package when the library is loaded. \n\nThe full manuscript, including further analysis and presentation of results is written in Quarto [@AllaireQuarto2024]. The source code to reproduce the manuscript is hosted at [https://github.com/egouldo/ManyAnalysts/](https://github.com/egouldo/ManyAnalysts/), and the rendered version of the source code may be viewed at [https://egouldo.github.io/ManyAnalysts/](https://egouldo.github.io/ManyAnalysts/). All R packages and their versions used in the production of the manuscript are listed at @sec-sesion-info.\n\n\n# Results\n\n\n::: {.cell}\n\n:::\n\n\n## Summary Statistics\n\nIn total, 173 analyst teams, comprising 246 analysts, contributed 182 usable analyses (compatible with our meta-analyses and provided with all information needed for inclusion) of the two datasets examined in this study which yielded 215 effects.\nAnalysts produced 134 distinct effects that met our criteria for inclusion in at least one of our meta-analyses for the blue tit dataset.\nAnalysts produced 81 distinct effects meeting our criteria for inclusion for the *Eucalyptus* dataset.\nExcluded analyses and effects either did not answer our specified biological questions, were submitted with insufficient information for inclusion in our meta-analyses, or were incompatible with production of our effect size(s).\nWe expected cases of this final scenario (incompatible analyses), for instance we cannot extract a $Z_r$ from random forest models, which is why we analyzed two distinct types of effects, $Z_r$ and out-of-sample.\nEffects included in only a subset of our meta-analyses provided sufficient information for inclusion in only that subset (see @tbl-Table1).\nFor both datasets, most submitted analyses incorporated mixed effects.\nSubmitted analyses of the blue tit dataset typically specified normal error and analyses of the *Eucalyptus* dataset typically specified a non-normal error distribution (@tbl-Table1).\n\nFor both datasets, the composition of models varied substantially in regards to the number of fixed and random effects, interaction terms, and the number of data points used, and these patterns differed somewhat between the blue tit and *Eucalyptus* analyses (See @tbl-Table2).\nFocussing on the models included in the $Z_r$ analyses (because this is the larger sample), blue tit models included a similar number of fixed effects on average (mean 5.2 $\\pm$ 2.92 SD, range: 1 to 19) as *Eucalyptus* models (mean 5.01 $\\pm$ 3.83 SD, range: 1 to 13), but the standard deviation in number of fixed effects was somewhat larger in the *Eucalyptus* models.\nThe average number of interaction terms was much larger for the blue tit models (mean 0.44 $\\pm$ 1.11 SD, range: 0 to 10) than for the *Eucalyptus* models (mean 0.16 $\\pm$ 0.65 SD, range: 0 to 5), but still under 0.5 for both, indicating that most models did not contain interaction terms.\nBlue tit models also contained more random effects (mean 3.53 $\\pm$ 2.08 SD, range: 0 to 10) than *Eucalyptus* models (mean 1.41 $\\pm$ 1.09 SD, range: 0 to 4).\nThe maximum possible sample size in the blue tit dataset (3720 nestlings) was an order of magnitude larger than the maximum possible in the *Eucalyptus* dataset (351 plots), and the means and standard deviations of the sample size used to derive the effects eligible for our study were also an order of magnitude greater for the blue tit dataset (mean 2611.09 $\\pm$ 937.48 SD, range: 76 to 76) relative to the *Eucalyptus* models (mean 298.43 $\\pm$ 106.25 SD, range: 18 to 351).\nHowever, the standard deviation in sample size from the *Eucalyptus* models was heavily influenced by a few cases of dramatic sub-setting (described below).\nApproximately three quarters of *Eucalyptus* models used sample sizes within 3$\\%$of the maximum.\nIn contrast, fewer than 20$\\%$of blue tit models relied on sample sizes within 3$\\%$of the maximum, and approximately 50$\\%$of blue tit models relied on sample sizes 29$\\%$or more below the maximum.\n\nAnalysts provided qualitative descriptions of the conclusions of their analyses.\nEach analysis team provided one conclusion per dataset.\nThese conclusions could take into account the results of any formal analyses completed by the team as well as exploratory and visual analyses of the data.\nHere we summarize all qualitative responses, regardless of whether we had sufficient information to use the corresponding model results in our quantitative analyses below.\nWe classified these conclusions into the categories summarized below (@tbl-Table4):\n\n- Mixed: some evidence supporting a positive effect, some evidence supporting a negative effect\n- Conclusive negative: negative relationship described without caveat\n- Qualified negative: negative relationship but only in certain circumstances or where analysts express uncertainty in their result\n- Conclusive none: analysts interpret the results as conclusive of no effect\n- None qualified: analysts describe finding no evidence of a relationship but they describe the potential for an undetected effect\n- Qualified positive: positive relationship described but only in certain circumstances or where analysts express uncertainty in their result\n- Conclusive positive: positive relationship described without caveat\n\nFor the blue tit dataset, most analysts concluded that there was negative relationship between measures of sibling competition and nestling growth, though half the teams expressed qualifications or described effects as mixed or absent.\nNo analysts concluded that there was a positive relationship even though some individual effect sizes were positive, apparently because all analysts who produced effects indicating positive relationships also produced effects indicating negative relationships and therefore described their results as qualified, mixed, or absent.\nFor the *Eucalyptus* dataset, there was a broader spread of conclusions with at least one analyst team providing conclusions consistent with each conclusion category.\nThe most common conclusion for the *Eucalyptus* dataset was that there was no relationship between grass cover and *Eucalyptus* recruitment (either conclusive or qualified description of no relationship), but more than half the teams concluded that there were effects; negative, positive, or mixed.\n\n\n::: {#tbl-Table4 .cell .column-page-right tbl-cap='Tallies of analysts\\' qualitative answers to the research questions addressed by their analyses.'}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
DatasetMixedNegative ConclusiveNegative QualifiedNone ConclusiveNone QualifiedPositive ConclusivePositive Qualified
blue tit537274100
Eucalyptus8612191224
\n
\n```\n:::\n\n\n## Distribution of Effects\n\n### Standardized Effect Sizes ($Z_r$)\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n:::\n\n\nAlthough the majority (224 of 131) of the usable $Z_r$ effects from the blue tit dataset found nestling growth decreased with sibling competition, and the meta-analytic mean $\\bar{Z_r}$ (Fisher's transformation of the correlation coefficient) was convincingly negative (-0.35 $\\pm$ 0.06 95$\\%$CI), there was substantial variability in the strength and the direction of this effect.\n$Z_r$ ranged from -1.55 to 0.38, and approximately continuously from -0.93 to 0.19 ( @fig-forest-plots-Zr-1 and @tbl-effects-params), and of the 224 effects with negative slopes, 177 had confidence intervals exluding 0.\nOf the 24 with positive slopes indicating increased nestling growth in the presence of more siblings, 3 had confidence intervals excluding zero (@fig-forest-plots-Zr A).\n\nMeta-analysis of the *Eucalyptus* dataset also showed substantial variability in the strength of effects as measured by $Z_r$, and unlike with the blue tits, a notable lack of consistency in the direction of effects (@fig-forest-plots-Zr-2, @tbl-effects-params).\n$Z_r$ ranged from -4.47 (@fig-specr-euc), indicating a strong tendency for reduced *Eucalyptus* seedling success as grass cover increased, to 0.39, indicating the opposite.\nAlthough the range of reported effects skewed strongly negative, this was due to a small number of substantial outliers.\nMost values of $Z_r$ were relatively small with values $\\lt |0.2|$ and the meta-analytic mean effect size was close to zero (-0.09 $\\pm$ 0.12 95$\\%$CI).\nOf the 79 effects, fifty-three had confidence intervals overlapping zero, approximately a quarter (fifteen) crossed the traditional threshold of statistical significance indicating a negative relationship between grass cover and seedling success, and eleven crossed the significance threshold indicating a positive relationship between grass cover and seedling success (@fig-forest-plots-Zr-2).\n\n\n::: {.cell}\n\n:::\n\n::: {#fig-forest-plots-Zr .cell .preview-image .column-page-right layout-nrow=\"2\"}\n::: {.cell-output-display}\n![Blue tit analyses: Points where $Z_r$ are less than 0 indicate analyses that found a negative relationship between sibling number and nestling growth.](index_files/figure-html/fig-forest-plots-Zr-1.png){#fig-forest-plots-Zr-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus* analyses: Points where $Z_r$ are less than 0 indicate a negative relationship between grass cover and *Eucalyptus* seedling success.](index_files/figure-html/fig-forest-plots-Zr-2.png){#fig-forest-plots-Zr-2 width=672}\n:::\n\nForest plots of meta-analytic estimated standardized effect sizes ($Z_r$, blue circles) and their 95$\\%$confidence intervals for each effect size included in the meta-analysis model. The meta-analytic mean effect size is denoted by a black triangle and a dashed vertical line, with error bars also representing the 95$\\%$confidence interval. The solid black vertical line demarcates effect size of 0, indicating no relationship between the test variable and the response variable. Note that the *Eucalyptus* plot omits one extreme outlier with the value of -4.47 (@fig-specr-euc) in order to standardize the x-axes on these two panels.\n:::\n\n\n### Out-of-sample predictions $y_{i}$\n\n\n::: {.cell}\n\n:::\n\n\nAs with the effect size $Z_r$, we observed substantial variability in the size of out-of-sample predictions derived from the analysts' models.\nBlue tit predictions (@fig-forest-plot-bt-yi), which were z-score-standardised to accommodate the use of different response variables, always ranged far in excess of one standard deviation.\nIn the $y_{25}$ scenario, model predictions ranged from -1.84 to 0.41 (a range of 2.68 standard deviations), in the $y_{50}$ they ranged from -0.52 to 1.11 (a range of 1.63 standard deviations), and in the $y_{75}$ scenario they ranged from -0.03 to 1.59 (a range of 1.9 standard deviations).\nAs should be expected given the existence of both negative and positive $Z_r$ values, all three out-of-sample scenarios produced both negative and positive predictions, although as with the $Z_r$ values, there is a clear trend for scenarios with more siblings to be associated with smaller nestlings.\nThis is supported by the meta-analytic means of these three sets of predictions which were -0.66 (95$\\%$CI -0.82--0.5) for the $y_{25}$, 0.34 (95$\\%$CI 0.2-0.48) for the $y_{50}$, and 0.67 (95$\\%$CI 0.57-0.78) for the $y_{75}$.\n\n*Eucalyptus* out-of-sample predictions also varied substantially (@fig-euc-yi-forest-plot), but because they were not z-score-standardised and are instead on the original count scale, the types of interpretations we can make differ.\nThe predicted *Eucalyptus* seedling counts per 15 x 15 m plot for the $y_{25}$ scenario ranged from 0.04 to 33.66, for the $y_{50}$ scenario ranged from 0.03 to 13.02, and for the $y_{75}$ scenario they ranged from 0.05 to 21.93.\nThe meta-analytic mean predictions for these three scenarios were similar; 0.58 (95$\\%$CI 0.21-1.37) for the $y_{25}$, 1.67 (95$\\%$CI 0.36-1.65) for the $y_{50}$, and 1.67 (95$\\%$CI 0.8-2.83) for the $y_{75}$ scenarios respectively.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated standardized (z-score) blue tit out-of-sample predictions, $y_i$. Circles represent individual estimates. Triangles represent the meta-analytic mean for each prediction scenario. Dark-blue points correspond to $y_{25}$ scenario, medium-blue points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95$\\%$confidence intervals.](index_files/figure-html/fig-forest-plot-bt-yi-1.png){#fig-forest-plot-bt-yi width=672}\n:::\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated *Eucalyptus* out-of-sample predictions, $y_{i}$, on the response-scale (stems counts). Circles represent individual estimates. Triangles represent the meta-analytic mean for each prediction scenario. Dark-blue points correspond to $y_{25}$ scenario, medium-blue points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (observations more than 3SD above the mean) have been removed prior to model fitting and do not appear on this figure. x-axis is truncated to approx. 40, and thus some error bars are incomplete. See @fig-euc-yi-forest-plot-full for full figure.](index_files/figure-html/fig-euc-yi-forest-plot-1.png){#fig-euc-yi-forest-plot width=672}\n:::\n:::\n\n\n## Quantifying Heterogeneity\n\n### Effect Sizes ($Z_r$)\n\n\n::: {.cell}\n\n:::\n\n\nWe quantified both absolute ($\\tau^{2}$) and relative ($I^{2}$) heterogeneity resulting from analytical variation.\nBoth measures suggest that substantial variability among effect sizes was attributable to the analytical decisions of analysts.\n\nThe total absolute level of variance beyond what would typically be expected due to sampling error, $\\tau^{2}$ (@tbl-effects-heterogeneity), among all usable blue tit effects was 97.61 and for *Eucalyptus* effects was 98.59.\nThis is similar to or exceeding the median value (0.105) of $\\tau^{2}$ found across 31 recent meta-analyses [calculated from the data in @yang2023].\nThe similarity of our observed values to values from meta-analyses of different studies based on different data suggest the potential for a large portion of heterogeneity to arise from analytical decisions.\nFor further discussion of interpretation of $\\tau^{2}$ in our study, please consult discussion of *post hoc* analyses below.\n\n\n::: {#tbl-effects-heterogeneity .cell tbl-cap='Heterogeneity in the estimated effects $Z_r$ for meta-analyses of: the full dataset, as well as from post hoc analyses wherein analyses with outliers are removed, analyses with effects from analysis teams with at least one \"unpublishable\" rating are excluded, analyses receiving at least one \"major revisions\" rating or worse excluded, analyses from teams with at least one analyst self-rated as \"highly proficient\" or \"expert\" in statistical analysis are included, and (blue tit only) the dataset excluding effects from analyses that included the pair of highly collinear predictors. ${\\\\tau}_\\\\text{Team}^{2}$ is the absolute heterogeneity for the random effect `Team`. ${\\\\tau}_\\\\text{effectID}^{2}$ is the absolute heterogeneity for the random effect `effectID` nested under `Team`. `effectID` is the unique identifier assigned to each individual statistical effect submitted by an analysis team. We nested `effectID` within analysis team identity (`Team`) because analysis teams often submitted >1 statistical effect, either because they considered >1 model or because they derived >1 effect per model, especially when a model contained a factor with multiple levels that produced >1 contrast. ${\\\\tau}_\\\\text{Total}^{2}$ is the total absolute heterogeneity. ${I}_\\\\text{Total}^{2}$ is the proportional heterogeneity; the proportion of the variance among effects not attributable to sampling error, ${I}_\\\\text{Team}^{2}$ is the subset of the proportional heterogeneity due to differences among `Teams` and ${I}_\\\\text{Team, effectID}^{2}$ is subset of the proportional heterogeneity attributable to among-`effectID` differences.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
Dataset$${\\tau}_\\text{Total}^{2}$$$${\\tau}_\\text{Team}^{2}$$$${\\tau}_\\text{effectID}^{2}$$$${I^2}_\\text{Total}$$$${I^2}_\\text{Team}$$$${I^2}_\\text{Team, effectID}$$N.Obs
All analyses
Eucalyptus
0.270.020.2598.589%6.89%91.70%79
blue tit
0.080.030.0597.610%36.71%60.90%131
blue tit , collinearity removed0.070.040.0396.924%58.18%38.75%117
All analyses, Outliers removed
Eucalyptus
0.010.000.0166.190%19.25%46.94%75
blue tit
0.070.040.0296.841%64.63%32.21%127
Analyses receiving at least one 'Unpublishable' rating removed
Eucalyptus
0.010.010.0179.737%28.31%51.43%55
blue tit
0.080.030.0597.517%35.68%61.84%109
Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed
Eucalyptus
0.030.030.0088.913%88.91%0.00%13
blue tit
0.140.010.1398.717%5.17%93.55%32
Analyses from teams with highly proficient or expert data analysts
Eucalyptus
0.580.020.5699.412%3.47%95.94%34
blue tit
0.090.030.0697.914%31.43%66.49%89
\n
\n```\n\n:::\n:::\n\n\nIn our analyses, $I^{2}$ is a plausible index of how much more variability among effect sizes we have observed, as a proportion, than we would have observed if sampling error were driving variability.\nWe discuss our interpretation of $I^{2}$ further in the methods, but in short, it is a useful metric for comparison to values from published meta-analyses and provides a plausible value for how much heterogeneity could arise in a normal meta-analysis with similar sample sizes due to analytical variability alone.\nIn our study, total $I^{2}$ for the blue tit $Z_r$ estimates was extremely large, at 97.61%, as was the *Eucalyptus* estimate (98.59% @tbl-effects-heterogeneity).\n\nAlthough the overall $I^{2}$ values were similar for both *Eucalyptus* and blue tit analyses, the relative composition of that heterogeneity differed.\nFor both datasets, the majority of heterogeneity in $Z_r$ was driven by differences among effects as opposed to differences among teams, though this was more prominent for the *Eucalyptus* dataset, where nearly all of the total heterogeneity was driven by differences among effects (91.7%) as opposed to differences among teams (6.89%) (@tbl-effects-heterogeneity).\n\n### Out-of-sample predictions ($y_{i}$)\n\n\n::: {.cell}\n\n:::\n\n\nWe observed substantial heterogeneity among out-of-sample estimates, but the pattern differed somewhat from the $Z_r$ values (@tbl-yi-heterogeneity).\nAmong the blue tit predictions, $I^{2}$ ranged from medium-high for the $y_{25}$ scenario (68.02) to low (27.5) for the $y_{75}$ scenario.\nAmong the *Eucalyptus* predictions, $I^{2}$ values were uniformly high (\\>82%).\nFor both datasets, most of the existing heterogeneity among predicted values was attributable to among-team differences, with the exception of the $y_{50}$ analysis of the *Eucalyptus* dataset.\nWe are limited in our interpretation of $\\tau^{2}$ for these estimates because, unlike for the $Z_r$ estimates, we have no benchmark for comparison with other meta-analyses.\n\n\n::: {#tbl-yi-heterogeneity .cell tbl-cap='Heterogeneity among the out-of-sample predictions ${y}_{i}$ for both blue tit and *Eucalyptus* datasets. ${\\tau}_\\text{Team}^{2}$ is the absolute heterogeneity for the random effect `Team`. ${\\tau}_\\text{effectID}^{2}$ is the absolute heterogeneity for the random effect `effectID` nested under `Team`. `effectID` is the unique identifier assigned to each individual statistical effect submitted by an analysis team. We nested `effectID` within analysis team identity (`Team`) because analysis teams often submitted >1 statistical effect, either because they considered >1 model or because they derived >1 effect per model, especially when a model contained a factor with multiple levels that produced >1 contrast. ${\\tau}_\\textTtotal}^{2}$ is the total absolute heterogeneity. ${I}_\\text{Total}^{2}$ is the proportional heterogeneity; the proportion of the variance among effects not attributable to sampling error, ${I}_\\text{Team}^{2}$ is the subset of the proportional heterogeneity due to differences among `Teams` and ${I}_\\text{Team,effectID}^{2}$ is subset of the proportional heterogeneity attributable to among-`effectID` differences.'}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
Prediction Scenario$${N}_\\text{Obs}$$$${\\tau}_\\text{Total}$$$${\\tau}_\\text{Team}^{2}$$$${\\tau}_\\text{effectID}^{2}$$$${I}_\\text{Total}^{2}$$$${I}_\\text{Team}^{2}$$$${I}_{Team, effectID}^{2}$$
blue tit
y25620.140.110.0368.02%51.55%16.48%
y50590.070.060.0150.39%45.14%5.25%
y75620.020.020.0027.50%25.88%1.62%
Eucalyptus
y25223.051.951.1088.76%56.76%32.00%
y50241.610.531.0883.26%27.52%55.73%
y75241.691.410.2879.76%66.52%13.25%
\n
\n```\n:::\n\n\n## Post-hoc Analysis: Exploring outlier characteristics and the effect of outlier removal on heterogeneity\n\n### Effect Sizes ($Z_r$)\n\n\n::: {.cell}\n\n:::\n\n\nThe outlier *Eucalyptus* $Z_r$ values were striking and merited special examination.\nThe three negative outliers had very low sample sizes were based on either small subsets of the dataset or, in one case, extreme aggregation of data.\nThe outliers associated with small subsets had sample sizes ($n=$ 117, 90, 18) that were less than half of the total possible sample size of 351.\nThe case of extreme aggregation involved averaging all values within each of the 351 sites in the dataset.\n\n\n::: {.cell}\n\n:::\n\n\nSurprisingly, both the largest and smallest effect sizes in the blue tit analyses (@fig-forest-plots-Zr-1) come from the same analyst (anonymous ID: 'Adelong'), with identical models in terms of the explanatory variable structure, but with different response variables.\nHowever, the radical change in effect was primarily due to collinearity with covariates.\nThe primary predictor variable (brood count after manipulation) was accompanied by several collinear variables, including the highly collinear (correlation of 0.89 @fig-ggpairs-bt) covariate (brood count at day 14) in both analyses.\nIn the analysis of nestling weight, brood count after manipulation showed a strong positive partial correlation with weight after controlling for brood count at day 14 and treatment category (increased, decreased, unmanipulated).\nIn that same analysis, the most collinear covariate (the day 14 count) had a negative partial correlation with weight.\nIn the analysis with tarsus length as the response variable, these partial correlations were almost identical in absolute magnitude, but reversed in sign and so brood count after manipulation was now the collinear predictor with the negative relationship.\nThe two models were therefore very similar, but the two collinear predictors simply switched roles, presumably because a subtle difference in the distribution of weight and tarsus length data.\n\nWhen we dropped the *Eucalyptus* outliers, $I^{2}$ decreased from high (98.59 $\\%$), using Higgins' [@higgins2003] suggested benchmark, to between moderate and high (66.19 $\\%$, @tbl-effects-heterogeneity).\nHowever, more notably, $\\tau^2$ dropped from 0.27 to 0.01, indicating that, once outliers were excluded, the observed variation in effects was similar to what we would expect if sampling error were driving the differences among effects (since $\\tau^2$ is the variance in addition to that driven by sampling error).\nThe interpretation of this value of $\\tau^2$ in the context of our many-analyst study is somewhat different than a typical meta-analysis, however, since in our study (especially for *Eucalyptus*, where most analyses used almost exactly the same data points), there is almost no role for sampling error in driving the observed differences among the estimates.\nThus, rather than concluding that the variability we observed among estimates (after removing outliers) was due only to sampling error [because $\\tau^2$ became small: 10$\\%$ of the median from @yang2023], we instead conclude that the observed variability, which must be due to the divergent choices of analysts rather than sampling error, is approximately of the same magnitude as what we would have expected if, instead, sampling error, and not analytical heterogeneity, were at work.\nPresumably, if sampling error had actually also been at work, it would have acted as an additional source of variability and would have led total variability among estimates to be higher.\nWith total variability higher and thus greater than expected due to sampling error alone, $\\tau^2$ would have been noticeably larger.\nConversely, dropping outliers from the set of blue tit effects did not meaningfully reduce $I^{2}$ , and only modestly reduced $\\tau^2$ (@tbl-effects-heterogeneity).\nThus, effects at the extremes of the distribution were much stronger contributors to total heterogeneity for effects from analyses of the *Eucalyptus* than for the blue tit dataset.\n\n\n::: {#tbl-effects-params .cell tbl-cap='Estimated mean value of the standardised correlation coefficient, $Z_r$, along with its standard error and 95$\\\\%$confidence intervals. We re-computed the meta-analysis for different post-hoc subsets of the data: All eligible effects, removal of effects from blue tit analyses that contained highly collinear predictor variables, removal of effects from analysis teams that received at least one peer rating of \"deeply flawed and unpublishable\", removal of any effects from analysis teams that received at least one peer rating of either \"deeply flawed and unpublishable\" or \"publishable with major revisions\", inclusion of only effects from analysis teams that included at least one member who rated themselves as \"highly proficient\" or \"expert\" at conducting statistical analyses in their research area.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Dataset$$\\hat\\mu$$$$\\text{SE}[\\hat\\mu]$$95%CIstatisticp-value
All analyses
Eucalyptus
−0.090.06[−0.22,0.03]−1.470.14
blue tit
−0.350.03[−0.41,−0.29]−11.02<0.001
blue tit , collinearity removed−0.360.03[−0.42,−0.29]−10.97<0.001
All analyses, outliers removed
Eucalyptus
−0.030.01[−0.06,0.00]−2.230.026
blue tit
−0.360.03[−0.42,−0.30]−11.48<0.001
Analyses from teams with highly proficient or expert data analysts
Eucalyptus
−0.170.13[−0.43,0.10]−1.240.2
blue tit
−0.360.04[−0.44,−0.28]−8.93<0.001
Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed
Eucalyptus
−0.040.05[−0.15,0.07]−0.770.4
blue tit
−0.370.07[−0.51,−0.23]−5.34<0.001
Analyses receiving at least one 'Unpublishable' rating removed
Eucalyptus
−0.020.02[−0.07,0.02]−1.150.3
blue tit
−0.360.03[−0.43,−0.30]−10.82<0.001
\n
\n```\n\n:::\n:::\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller and the pattern of outliers differed.\n\n## Post-hoc analysis: Exploring the effect of removing analyses with poor peer ratings on heterogeneity\n\n### Effect Sizes ($Z_r$)\n\nRemoving poorly rated analyses had limited impact on the meta-analytic means (@fig-all-forest-plots-Zr).\nFor the *Eucalyptus* dataset, the meta-analytic mean shifted from -0.09, -0.17, -0.03 to -0.02 when effects from analyses rated as unpublishable were removed, and to -0.04 when effects from analyses rated, at least once, as unpublishable or requiring major revisions were removed.\nFurther, the confidence intervals for all of these means overlapped each of the other means (@tbl-effects-params).\nWe saw similar patterns for the blue tit dataset, with only small shifts in the meta-analytic mean, and confidence intervals of all three means overlapping each other mean (@tbl-effects-params).\nRefitting the meta-analysis with a fixed effect for categorical ratings also showed no indication of differences in group meta-analytic means due to peer ratings (@fig-euc-cat-ratings-MA).\n\nFor the blue tit dataset, removing poorly-rated analyses led to only negligible changes in ${I}_\\text{Total}^{2}$ and relatively minor impacts on $\\tau^{2}$ .\nHowever, for the *Eucalyptus* dataset, removing poorly-rated analyses led to notable reductions in ${I}_\\text{Total}^{2}$ and substantial reductions in $\\tau^{2}$.\nWhen including all analyses, the *Eucalyptus* ${I}_\\text{Total}^{2}$ was 98.59% and $\\tau^{2}$ was 0.27, but eliminating analyses with ratings of \"unpublishable\" reduced ${I}_\\text{Total}^{2}$ to 79.74% and $\\tau^{2}$ to 0.01, and removing also those analyses \"needing major revisions\" left ${I}_\\text{Total}^{2}$ at 88.91% and $\\tau^{2}$ at 0.03 (@tbl-effects-heterogeneity).\nAdditionally, the allocations of $I^{2}$ to the team versus individual effect were altered for both blue tit and *Eucalyptus* meta-analyses by removing poorly rated analyses, but in different ways.\nFor blue tit meta-analysis, between a third and two-thirds of the total $I^{2}$ was attributable to among-team variance in most analyses until both analyses rated \"unpublishable\" and analyses rated in need of \"major revision\" were eliminated, in which case almost all remaining heterogeneity was attributable to among-effect differences.\nIn contrast, for *Eucalyptus* meta-analysis, the among-team component of $I^{2}$ was less than third until both analyses rated \"unpublishable\" and analyses rated in need of \"major revision\" were eliminated, in which case almost 90$\\%$of heterogeneity was attributable to differences among teams.\n\n### Out-of-sample predictions $y_{i}$\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller and our ability to interpret heterogeneity values for these analyses was limited.\n\n## Post-hoc analysis: Exploring the effect of including only analyses conducted by analysis teams with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statistical analyses in their research area\n\n### Effect Sizes ($Z_r$)\n\nIncluding only analyses conducted by teams that contained at least one member who rated themselves as \"highly proficient\" or \"expert\" in conducting the relevant statistical methods had negligible impacts on the meta-analytic means (@tbl-effects-params), the distribution of $Z_r$ effects (@fig-forest-plot-expertise), or heterogeneity estimates (@tbl-effects-heterogeneity), which remained extremely high.\n\n### Out-of-sample predictions $y_{i}$\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller.\n\n## Post-hoc analysis: Exploring the effect of excluding estimates of $Z_r$ in which we had reduced confidence\n\nAs described in our addendum to the methods, we identified a subset of estimates of $Z_r$ in which we had less confidence because of features of the submitted degrees of freedom.\nExcluding these effects in which we had lower confidence had minimal impact on the meta-analytic mean and the estimates of total $I^{2}$ and $\\tau^{2}$ for both blue tit and *Eucalyptus* meta-analyses, regardless of whether outliers were also excluded (@tbl-Zr-exclusion-subsetting).\n\n## Post-hoc analysis: Exploring the effect of excluding effects from blue tit models that contained two highly collinear predictors\n\n### Effect Sizes ($Z_r$)\n\nExcluding effects from blue tit models that contained the two highly collinear predictors (brood count after manipulation and brood count at day 14) had negligible impacts on the meta-analytic means (@tbl-effects-params), the distribution of $Z_r$ effects (@fig-forest-plot-Zr-collinear-rm-subset), or heterogeneity estimates (@tbl-effects-heterogeneity), which remained high.\n\n### Out-of-sample predictions $y_{i}$\n\nInclusion of collinear predictors does not harm model prediction, and so we did not conduct these *post-hoc* analyses.\n\n## Explaining Variation in Deviation Scores\n\nNone of the pre-registered predictors explained substantial variation in deviation among submitted statistical effects from the meta-analytic mean (@tbl-model-summary-stats-ratings-cont, @tbl-deviation-rating-estimates).\nNote that the extremely high ${R}_\\text{Conditional}^{2}$ values from the analyses of continuous peer ratings as predictors of deviation scores are a function of the random effects, not the fixed effect of interest.\nThese high values of the ${R}_\\text{Conditional}^{2}$ result from the fact that each effect size was included in the analysis multiple times, to allow comparison with ratings from the multiple peer reviewers who reviewed each analysis, and therefore when we included effect ID as a random effect, the observations within each random effect category were identical.\n\n\n::: {#tbl-model-summary-stats-ratings-cont .cell .column-page-right}\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
Summary metrics for registered models seeking to explain deviation (Box-Cox transformed absolute deviation scores) from the mean \\(Z_r\\) as a function of Sorensen’s Index, categorical peer ratings, and continuous peer ratings for blue tit and Eucalyptus analyses, and as a function of the presence or absence of random effects (in the analyst’s models) for Eucalyptus analyses. We report coefficient of determination, \\(R^2\\), for our models including only fixed effects as predictors of deviation, and we report \\(R^{2}_\\text{Conditional}\\), \\(R^{2}_\\text{Marginal}\\) and the intra-class correlation (ICC) from our models that included both fixed and random effects. For all our models, we calculated the residual standard deviation \\(\\sigma\\) and root mean squared error (RMSE).
Dataset$$R^2$$$${R}_\\text{Conditional}^{2}$$$${R}_\\text{Marginal}^{2}$$ICC$$\\sigma$$RMSE$$N_\\text{Obs.}$$
Deviation explained by categorical ratings
Eucalyptus
0.13191.24e-020.12091.06e+001.02e+00346
blue tit
0.09077.47e-030.08384.98e-014.83e-01473
Deviation explained by continuous ratings
Eucalyptus
0.99988.68e-300.99987.60e-036.45e-14346
blue tit
1.00008.15e-271.00001.11e-051.40e-12473
Deviation explained by Sorensen's index
Eucalyptus5.06e-04


1.14e+001.12e+00 72
blue tit6.02e-03


5.20e-015.16e-01124
Deviation explained by inclusion of random effects
Eucalyptus8.75e-08


1.12e+001.10e+00 79
blue tit2.87e-02


5.04e-015.00e-01131
\n
\n```\n\n:::\n:::\n\n::: {#tbl-deviation-rating-estimates .cell tbl-cap='Parameter estimates from models of Box-Cox transformed deviation scores as a function of continuous and categorical peer ratings, Sorensen scores, and the inclusion of random effects. Standard Errors (SE), 95$\\%$ confidence intervals (95$\\%$ CI) are reported for all estimates, while t values, degrees of freedom and p-values are presented for fixed-effects. Note that positive parameter estimates mean that as the predictor variable increases, so does the absolute value of the deviation from the meta-analytic mean.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
ParameterEffectsGroupCoefficientSE95%CItdfp
Deviation explained by inclusion of random effects - Eucalyptus
(Intercept)

-2.530.27[-3.06,-1.99] -9.3177<0.001
Mixed model

0.000.31[-0.60, 0.60] 0.0077>0.9
Deviation explained by Sorensen’s index - Eucalyptus
(Intercept)

-2.751.07[-4.85,-0.65] -2.57700.010
Mean Sorensen's index

0.291.54[-2.74, 3.32] 0.19700.9
Deviation explained by Sorensen’s index - blue tit
(Intercept)

-1.530.29[-2.10,-0.97] -5.30122<0.001
Mean Sorensen's index

0.410.48[-0.53, 1.36] 0.861220.4
Deviation explained by continuous ratings - Eucalyptus
(Intercept)fixed-2.530.06[-2.65,-2.41]-41.62342<0.001
RateAnalysisfixed-7e-172e-10[-4e-10,4e-10]-3e-07342>0.9
SD (Intercept)randomEffect ID 0.530.04[ 0.45, 0.62]


SD (Observations)randomResidual0.013e-04[0.01,0.01]


Deviation explained by continuous ratings - blue tit
(Intercept)fixed-1.280.02[-1.32,-1.23]-54.53469<0.001
RateAnalysisfixed-1e-151e-09[-2e-09,2e-09]-1e-06469>0.9
SD (Intercept)randomEffect ID 0.260.02[ 0.23, 0.30]


SD (Observations)randomResidual 1e-054e-07[ 1e-05,1e-05]


Deviation explained by categorical ratings - Eucalyptus
(Intercept)fixed-2.660.27[-3.18,-2.13] -9.97340<0.001
Publishable with major revisionfixed 0.290.29[-0.27, 0.85] 1.023400.3
Publishable with minor revisionfixed 0.010.28[-0.54, 0.56] 0.04340>0.9
Publishable as isfixed 0.050.31[-0.55, 0.66] 0.173400.9
SD (Intercept)randomReviewer ID 0.390.09[ 0.25, 0.61]


SD (Observations)randomResidual1.060.04[0.98,1.15]


Deviation explained by categorical ratings - blue tit
(Intercept)fixed-1.110.11[-1.33,-0.89] -9.91467<0.001
Publishable with major revisionfixed-0.190.12[-0.42, 0.04] -1.624670.10
Publishable with minor revisionfixed-0.190.12[-0.42, 0.04] -1.654670.10
Publishable as isfixed-0.130.13[-0.39, 0.12] -1.024670.3
SD (Intercept)randomReviewer ID 0.150.04[ 0.10, 0.24]


SD (Observations)randomResidual0.50.02[0.46,0.53]


\n
\n```\n\n:::\n:::\n\n\n## Deviation Scores as explained by Reviewer Ratings\n\n### Effect Sizes ($Z_r$)\n\n\n::: {.cell}\n\n:::\n\n\nWe obtained reviews from 819 reviewers who reviewed analyses for a mean of 3.27 (range 1 - 11) analysis teams.\nAnalyses of the blue tit dataset received a total of 240 reviews, each was reviewed by a mean of 3.87 (SD 0.71, range 3-5) reviewers.\nAnalyses of the *Eucalyptus* dataset received a total of 178 reviews, each was reviewed by a mean of 4.2380952 (SD 0.79, range 3-6) reviewers.\nWe tested for inter-rater-reliability to examine how similarly reviewers reviewed each analysis and found approximately no agreement among reviewers.\nWhen considering continuous ratings, IRR was 0.01, and for categorical ratings, IRR was -0.14.\n\nMany of the models of deviance as a function of peer ratings faced issues of failure to converge or singularity due to sparse design matrices with our pre-registered random effects (`Effect_Id` and `Reviewer_ID`) ([see supplementary material -@tbl-explore-Zr-deviation-random-effects-structure]).\nThese issues persisted after increasing the tolerance and changing the optimizer.\nFor both *Eucalyptus* and blue tit datasets, models with continuous ratings as a predictor were singular when both pre-registered random effects were included.\n\nWhen using only categorical ratings as predictors, models converged only when specifying reviewer ID as a random effect.\nThat model had a ${R}_{C}^2$ of 0.09 and a ${R}_{M}^2$ of 0.01.\nThe model using the continuous ratings converged for both random effects (in isolation), but not both.\nWe present results for the model using study ID as a random effect because we expected it would be a more important driver of variation in deviation scores.\nThat model had a ${R}_{C}^2$ of 1 and a ${R}_{M}^2$ of 0.01 for the blue tit dataset and a ${R}_{C}^2$ of 1 and a ${R}_{M}^2$ of 0.01 for the *Eucalyptus* dataset.\nNeither continuous or categorical reviewer ratings of the analyses meaningfully predicted deviance from the meta-analytic mean (@tbl-deviation-rating-estimates, @fig-cat-peer-rating).\nWe re-ran the multi-level meta-analysis with a fixed-effect for the categorical publishability ratings and found no difference in mean standardised effect sizes among publishability ratings (@fig-euc-cat-ratings-MA).\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Violin plot of Box-Cox transformed deviation from meta-analytic mean $Z_r$ as a function of categorical peer rating. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95$\\%$CI of the estimate. **A** Blue tit dataset, **B** *Eucalyptus* dataset.](index_files/figure-html/fig-cat-peer-rating-1.png){#fig-cat-peer-rating width=960}\n:::\n:::\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nSome models of the influence of reviewer ratings on out-of-sample predictions ($y_{i}$) had issues with convergence and singularity of fit (see @tbl-yi-deviation-ratings-convergence-singularity) and those models that converged and were not singular showed no strong relationship (@fig-yi-deviation-cat-rating, @fig-yi-deviation-cont-rating), as with the $Zr$ analyses.\n\n## Deviation scores as explained by the distinctiveness of variables in each analysis\n\n### Effect Sizes ($Z_r$)\n\nWe employed Sorensen's index to calculate the distinctiveness of the set of predictor variables used in each model (@fig-sorensen-plots).\nThe mean Sorensen's score for blue tit analyses was 0.59 (SD: 0.1, range 0.43-0.86), and for *Eucalyptus* analyses was 0.69 (SD: 0.08, range 0.55-0.98).\n\nWe found no meaningful relationship between distinctiveness of variables selected and deviation from the meta-analytic mean (@tbl-deviation-rating-estimates, @fig-sorensen-plots) for either blue tit (mean 0.41, 95$\\%$CI -0.53,1.36) or *Eucalyptus* effects (mean 0.29, 95$\\%$CI -2.74,3.32).\n\n\n::: {#fig-sorensen-plots .cell layout-nrow=\"2\"}\n::: {.cell-output-display}\n![Blue tit](index_files/figure-html/fig-sorensen-plots-1.png){#fig-sorensen-plots-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus*](index_files/figure-html/fig-sorensen-plots-2.png){#fig-sorensen-plots-2 width=672}\n:::\n\nFitted model of the Box-Cox-transformed deviation score (deviation in effect size from meta-analytic mean) as a function of the mean Sorensen's index showing distinctiveness of the set of predictor variables. Grey ribbons on predicted values are 95$\\%$CI's.\n:::\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nAs with the $Z_r$ estimates, we did not observe any convincing relationships between deviation scores of out-of-sample predictions and Sorensen's index values.\nPlease see [supplementary material -@sec-sorensen-yi].\n\n## Deviation scores as explained by the inclusion of random effects\n\n### Effect Sizes ($Z_r$)\n\nThere were only three blue tit analyses that did not include random effects, which is below the pre-registered threshold for fitting a model of the Box-Cox transformed deviation from the meta-analytic mean as a function of whether the analysis included random-effects.\nHowever, 17 *Eucalyptus* analyses included only fixed effects, which crossed our pre-registered threshold.\nConsequently, we performed this analysis for the *Eucalyptus* dataset only.\nThere was no relationship between random-effect inclusion and deviation from meta-analytic mean among the *Eucalyptus* analyses (@tbl-deviation-rating-estimates, @fig-mixed-effect-marginal-means-plot).\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Violin plot of mean Box-Cox transformed deviation from meta-analytic mean as a function of random-effects inclusion in *Eucalyptus* analyses. '1' indicates random-effects were included in analyst's model, while 0 indicates no random-effects were included. White points for each group of analyses denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. ](index_files/figure-html/fig-mixed-effect-marginal-means-plot-1.png){#fig-mixed-effect-marginal-means-plot width=672}\n:::\n:::\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nAs with the $Z_r$ estimates, we did not examine the possibility of a relationship between the inclusion of random effects and the deviation scores of the blue tit out-of-sample predictions.\nWhen we examined the possibility of this relationship for the *Eucalyptus* effects, we found consistent evidence of somewhat higher Box-Cox-transformed deviation values for models including a random effect, meaning the models including random effects averaged slightly higher deviation from the meta-analytic means (@fig-yi-euc-deviation-RE-plots).\n\n## Multivariate Analysis Effect size ($Z_r$) and Out-of-sample predictions ($y_{i}$)\n\nLike the univariate models, the multivariate models did a poor job of explaining deviations from the meta-analytic mean.\nBecause we pre-registered a multivariate model that contained collinear predictors that produce results which are not readily interpretable, we present these models in the supplement.\nWe also had difficulty with convergence and singularity for multivariate models of out-of-sample ($y_i$) result, and had to adjust which random effects we included (@tbl-yi-multivar-singularity-convergence).\nHowever, no multivariate analyses of *Eucalyptus* out-of-sample results avoided problems of convergence or singularity, no matter which random effects we included (@tbl-yi-multivar-singularity-convergence).\nWe therefore present no multivariate *Eucalyptus* $y_i$ models.\nWe present parameter estimates from multivariate $Z_r$ models for both datasets (@tbl-multivariate-models-coefs, @tbl-multivariate-models-mod-summary) and from $y_i$ models from the blue tit dataset (@tbl-BT-yi-multivar-summary, @tbl-BT-yi-multivar-params).\nWe include interpretation of the results from these models in the supplement, but the results do not change the interpretations we present above based on the univariate analyses.\n\n# Discussion\n\nWhen a large pool of ecologists and evolutionary biologists analyzed the same two datasets to answer the corresponding two research questions, they produced substantially heterogeneous sets of answers.\nAlthough the variability in analytical outcomes was high for both datasets, the patterns of this variability differed distinctly between them.\nFor the blue tit dataset, there was nearly continuous variability across a wide range of $Z_r$ values.\nIn contrast, for the *Eucalyptus* dataset, there was less variability across most of the range, but more striking outliers at the tails.\nAmong out-of-sample predictions, there was again almost continuous variation across a wide range (2 SD) among blue tit estimates.\nFor *Eucalyptus*, out-of-sample predictions were also notably variable, with about half the predicted stem count values at \\<2 but the other half being much larger, and ranging to nearly 40 stems per 15 m x 15 m plot.\nWe investigated several hypotheses for drivers of this variability within datasets, but found little support for any of these.\nMost notably, even when we excluded analyses that had received one or more poor peer reviews, the heterogeneity in results largely persisted.\nRegardless of what drives the variability, the existence of such dramatically heterogeneous results when ecologists and evolutionary biologists seek to answer the same questions with the same data should trigger conversations about how ecologists and evolutionary biologists analyze data and interpret the results of their own analyses and those of others in the literature [e.g., @silberzahn2018; @simonsohn2020; @auspurg2021; @breznau2022].\n\nOur observation of substantial heterogeneity due to analytical decisions is consistent with a small earlier study in ecology [@stanton-geddes2014] and a growing body of work from the quantitative social sciences [e.g., @silberzahn2018; @botvinik-nezer2020; @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023].\nIn these studies, when volunteers from the discipline analyzed the same data, they produced a worryingly diverse set of answers to a pre-set question.\nThis diversity included a wide range of effect sizes, and in most cases, even involved effects in opposite directions.\nThus, our result should not be viewed as an anomalous outcome from two particular datasets, but instead as evidence from additional disciplines regarding the heterogeneity that can emerge from analyses of complex datasets to answer questions in probabilistic science.\nNot only is our major observation consistent with other studies, it is, itself, robust because it derived primarily from simple forest plots that we produced based on a small set of decisions that were mostly registered before data gathering and which conform to widely accepted meta-analytic practices.\n\nUnlike the strong pattern we observed in the forest plots, our other analyses, both registered and *post hoc*, produced either inconsistent patterns, weak patterns, or the absence of patterns.\nOur registered analyses found that deviations from the meta-analytic mean by individual effect sizes ($\\bar{Z_r}$) or the predicted values of the dependent variable ($\\bar{y}$) were poorly explained by our hypothesized predictors: peer rating of each analysis team's method section, a measurement of the distinctiveness of the set of predictor variables included in each analysis, or whether the model included random effects.\nHowever, in our *post hoc* analyses, we found that dropping analyses identified as unpublishable or in need of major revision by at least one reviewer modestly reduced the observed heterogeneity among the $Z_r$ outcomes, but only for *Eucalyptus* analyses, apparently because this led to the dropping of the major outlier.\nThis limited role for peer review in explaining the variability in our results should be interpreted cautiously because the inter-rater reliability among peer reviewers was extremely low, and at least some analyses that appeared flawed to us were not marked as flawed by reviewers. \nThus it seems that the peer reviews we received were of mixed quality, possibly due to lack of expertise or lack of care on the part of some reviewers.\nHowever, the hypothesis that poor quality analyses drove a substantial portion of the heterogeneity we observed was also contradicted by our observation that analysts' self-declared statistical expertise appeared unrelated to heterogeneity.\nWhen we retained only analyses from teams including at least one member with high self-declared levels of expertise, heterogeneity among effect sizes remained high.\nThus, our results suggest lack of statistical expertise is not the primary factor responsible for the heterogeneity we observed, although further work is merited before rejecting a role for statistical expertise.\nBesides variability in expertise, it is also possible that the volunteer analysts varied in the effort they invested, and low effort presumably drove at least some heterogeneity in results. However, analysts often submitted thoughtful and extensive code, tables, figures, and textual explanation and interpretations, which is evidence of substantial investment.\nFurther, we are confident that low effort alone is an insufficient explanation for the heterogeneity we observed because we have worked with these datasets ourselves, and we know from experience that there are countless plausible modeling alternatives that can produce a diversity of effects. \nAdditionally, heterogeneity in analytical outcomes differed notably between datasets, and there is no reason to expect that one set of analysts took this project less seriously than the other.\nReturning to our exploratory analyses, not surprisingly, simply dropping outlier values of $Z_r$ for *Eucalyptus* analyses, which had more extreme outliers, led to less observable heterogeneity in the forest plots, and also reductions in our quantitative measures of heterogeneity.\nWe did not observe a similar effect in the blue tit dataset because that dataset had outliers that were much less extreme and instead had more variability across the core of the distribution.\n\nOur major observations raise two broad questions; why was the variability among results so high, and why did the pattern of variability differ between our two datasets.\nOne important and plausible answer to the first question is that much of the heterogeneity derives from the lack of a precise relationship between the two biological research questions we posed and the data we provided.\nThis lack of a precise relationship between data and question creates many opportunities for different model specifications, and so may inevitably lead to varied analytical outcomes [@auspurg2021].\nHowever, we believe that the research questions we posed are consistent with the kinds of research question that ecologists and evolutionary biologists typically work from.\nWhen designing the two biological research questions, we deliberately sought to represent the level of specificity we typically see in these disciplines.\nThis level of specificity is evident when we look at the research questions posed by some recent meta-analyses in these fields:\n\n- \"how \\[does\\] urbanisation impact mean phenotypic values and phenotypic variation ... \\[in\\] paired urban and non-urban comparisons of avian life-history traits\" [@capilla-lasheras2022]\n\n- \"\\[what are\\] the effects of ocean acidification on the crustacean exoskeleton, assessing both exoskeletal ion content (calcium and magnesium) and functional properties (biomechanical resistance and cuticle thickness)\" [@siegel2022]\n\n- \"\\[what is\\] the extent to which restoration affects both the mean and variability of biodiversity outcomes ... \\[in\\] terrestrial restoration\" [@atkinson2022]\n\n- \"\\[does\\] drought stress \\[have\\] a negative, positive, or null effect on aphid fitness\" [@leybourne2021]\n\n- \"\\[what is\\] the influence of nitrogen-fixing trees on soil nitrous oxide emissions\" [@kou-giesbrecht2021]\n\nThere is not a single precise answer to any of these questions, nor to the questions we posed to analysts in our study.\nAnd this lack of single clear answers will obviously continue to cause uncertainty since ecologists and evolutionary biologists conceive of the different answers from the different statistical models as all being answers to the same general question.\nA possible response would be a call to avoid these general questions in favor of much more precise alternatives [@auspurg2021].\nHowever, the research community rewards researchers who pose broad questions [@simons2017], and so researchers are unlikely to narrow their scope without a change in incentives.\nFurther, we suspect that even if individual studies specified narrow research questions, other scientists would group these more narrow questions into broader categories, for instance in meta-analyses, because it is these broader and more general questions that often interest the research community.\n\nAlthough variability in statistical outcomes among analysts may be inevitable, our results raise questions about why this variability differed between our two datasets.\nWe are particularly interested in the differences in the distribution of $Z_r$ since the distributions of out-of-sample predictions were on different scales for the two datasets, thus limiting the value of comparisons.\nThe forest plots of $Z_r$ from our two datasets showed distinct patterns, and these differences are consistent with several alternative hypotheses.\nThe results submitted by analysts of the *Eucalyptus* dataset showed a small average (close to zero) with most estimates also close to zero (± 0.2), though about a third far enough above or below zero to cross the traditional threshold of statistical significance.\nThere were a small number of striking outliers that were very far from zero.\nIn contrast, the results submitted by analysts of the blue tit dataset showed an average much further from zero (- 0.35) and a much greater spread in the core distribution of estimates across the range of $Z_r$ values (± 0.5 from the mean), with few modest outliers.\nSo, why was there more spread in effect sizes (across the estimates that are not outliers) in the blue tit analyses relative to the *Eucalyptus* analyses?\n\nOne possible explanation for the lower heterogeneity among most *Eucalyptus* $Z_r$ effects is that weak relationships may limit the opportunities for heterogeneity in analytical outcome.\nSome evidence for this idea comes from two sets of \"many labs\" studies in psychology [@klein2014; @klein2018].\nIn these studies, many independent lab groups each replicated a large set of studies, including, for each study, the experiment, data collection, and statistical analyses.\nThese studies showed that, when the meta-analytic mean across the replications from different labs was small, there was much less heterogeneity among the outcomes than when the mean effect sizes were large [@klein2014; @klein2018].\nOf course, a weak average effect size would not prevent divergent effects in all circumstances.\nAs we saw with the *Eucalyptus* analyses, taking a radically smaller subset of the data can lead to dramatically divergent effect sizes even when the mean with the full dataset is close to zero.\n\nOur observation that dramatic sub-setting in the *Eucalyptus* dataset was associated with correspondingly dramatic divergence in effect sizes leads us towards another hypothesis to explain the differences in heterogeneity between the *Eucalyptus* and blue tit analysis sets.\nIt may be that when analysts often divide a dataset into subsets, the result will be greater heterogeneity in analytical outcome for that dataset.\nAlthough we saw sub-setting associated with dramatic outliers in the *Eucalyptus* dataset, nearly all other analyses of *Eucalyptus* data used close to the same set of 351 samples, and as we saw, these effects did not vary substantially.\nHowever, analysts often analyzed only a subset of the blue tit data, and as we observed, sample sizes were much more variable among blue tit effects, and the effects themselves were also much more variable.\nImportant to note here is that subsets of data may differ from each other for biological reasons, but they may also differ due to sampling error.\nSampling error is a function of sample size, and sub-samples are, by definition, smaller samples, and so more subject to variability in effects due to sampling error [@jennions2013].\n\nOther features of datasets are also plausible candidates for driving heterogeneity in analytical outcomes, including features of covariates.\nIn particular, relationships between covariates and the response variable as well as relationships between covariates and the primary independent variable (collinearity) can strongly influence the modeled relationship between the independent variable of interest and the dependent variable [@morrissey2018; @dormann2013].\nTherefore, inclusion or exclusion of these covariates can drive heterogeneity in effect sizes ($Z_r$).\nAlso, as we saw with the two most extreme $Z_r$ values from the blue tit analyses, in multivariate models with collinear predictors, extreme effects can emerge when estimating partial correlation coefficients due to high collinearity, and conclusions can differ dramatically depending on which relationship receives the researcher's attention.\nTherefore, differences between datasets in the presence of strong and/or collinear covariates could influence the differences in heterogeneity in results among those datasets.\n\nAlthough it is too early in the many-analyst research program to conclude which analytical decisions or which features of datasets are the most important drivers of heterogeneity in analytical outcomes, we must still grapple with the possibility that analytical outcomes may vary substantially based on the choices we make as analysts.\nIf we assume that, at least sometimes, different analysts will produce dramatically different statistical outcomes, what should we do as ecologists and evolutionary biologists?\nWe review some ideas below.\n\nThe easiest path forward after learning about this analytical heterogeneity would be simply to continue with \"business as usual\", where researchers report results from a small number of statistical models.\nA case could be made for this path based on our results.\nFor instance, among the blue tit analyses, the precise values of the estimated $Z_r$ effects varied substantially, but the average effect was convincingly different from zero, and a majority of individual effects (84%) were in the same direction.\nArguably, many ecologists and evolutionary biologists appear primarily interested in the direction of a given effect and the corresponding p-value [@fidler2006], and so the variability we observed when analyzing the blue tit dataset may not worry these researchers.\nSimilarly, most effects from the *Eucalyptus* analyses were relatively close to zero, and about two-thirds of these effects did not cross the traditional threshold of statistical significance.\nTherefore, a large proportion of people analyzing these data would conclude that there was no effect, and this is consistent with what we might conclude from the meta-analysis.\n\nHowever, we find the counter arguments to \"business as usual\" to be compelling.\nFor blue tits, there were a substantial minority of calculated effects that would be interpreted by many biologists as indicating the absence of an effect (28%), and there were three traditionally 'significant' effects in the opposite direction to the average.\nThe qualitative conclusions of analysts also reflected substantial variability, with fully half of teams drawing a conclusion distinct from the one we draw from the distribution as a whole.\nThese teams with different conclusion were either uncertain about the negative relationship between competition and nestling growth, or they concluded that effects were mixed or absent.\nFor the *Eucalyptus* analyses, this issue is more concerning.\nAround two-thirds of effects had confidence intervals overlapping zero, and of the third of analyses with confidence intervals excluding zero, almost half were positive, and the rest were negative.\nAccordingly, the qualitative conclusions of the *Eucalyptus* teams were spread across the full range of possibilities.\nBut even this striking lack of consensus may be much less of a problem than what could emerge as scientists select which results to publish.\n\nA potentially larger argument against \"business as usual\" is that it provides the raw material for biasing the literature.\nWhen different model specifications readily lead to different results, analysts may be tempted to report the result that appears most interesting, or that is most consistent with expectation [@gelman2013; @forstmeier2017].\nThere is growing evidence that researchers in ecology and evolutionary biology often report a biased subset of the results they produce [@deressa2023; @kimmel2023], and that this bias exaggerates the average size of effects in the published literature between 30 and 150% [@yang2023; @parker2023].\nThe bias then accumulates in meta-analyses, apparently more than doubling the rate of conclusions of \"statistical significance\" in published meta-analyses above what would have been found in the absence of bias [@yang2023].\nThus, \"business as usual\" does not just create noisy results, it helps create systematically misleading results.\n\nIf we move away from “business as usual”, where do we go? \nMany obvious options involve multiple analyses per dataset. \nFor instance, there is the traditional robustness or sensitivity check [e.g., @pei2020; @briga2021], in which the researcher presents several alternative versions of an analysis to demonstrate that the result is ‘robust’ [@lu2014]. \nUnfortunately, robustness checks are at risk of the same potential biases of reporting found in other studies [@silberzahn2018], especially given the relatively few models typically presented. \nHowever, these risks could be minimized by running more models and doing so with a pre-registration or registered report. \nAnother option is model averaging. \nAverages across models often perform well [e.g., @taylor2023], and in some forms this may be a relatively simple solution. \nModel averaging, as most often practiced in ecology and evolutionary biology, involves first identifying a small suite of candidate models [see @burnham2002], then using Akaike weights, based on Akaike’s Information Criterion (AIC), to calculate weighted averages for parameter estimates from those models. \nAs with typical robustness checks, the small number of models limits the exploration of specification space, but examining a larger number of models could become the norm. \nHowever, there are more concerning limitations. \nThe largest of these limitations is that averaging regression coefficients is problematic when models differ in interaction terms or collinear variables [@cade2015]. \nAdditionally, weighting by AIC may often be inconsistent with our modelling goals. AIC balances the trade-off between model complexity and predictive ability, but penalizing models for complexity may not be suited for testing hypotheses about causation. \nSo, AIC may often not offer the weight we want to use, and we may also not wish to just generate an average at all. \nInstead, if we hope to understand an extensive universe of possible modelling outcomes, we could conduct a multiverse analysis, possibly with a specification curve [@simonsohn2015; @simonsohn2020]. \nThis could mean running hundreds or thousands of models (or more!) to examine the distribution of possible effects, and to see how different model specification choices map onto these effects. \nHowever, exploring large areas of specification space may come at the cost of including biologically implausible specifications. \nThus, we expect a trade-off, and attempts to limit models to the most biologically plausible may become increasingly difficult in proportion to the number of variables and modeling choices. \nTo make selecting plausible models easier, one could recruit multiple analysts to design one or a few plausible specifications each as with our ‘many analyst’ study [@silberzahn2018]. \nAn alternative that may be more labor intensive for the primary analyst, but which may lead to a more plausible set of models, could involve hypothesizing about causal pathways with DAGs [directed acyclic graphs; @arif2023] to constrain the model set. \nAs with other options outlined above, generating model specifications with DAGs could be partnered with pre-registration to hinder bias from undisclosed data dredging. \n\nResponses to heterogeneity in analysis outcomes need not be limited to simply conducting more analyses, especially if it turns out that analysis quality drives some of the observed heterogeneity. \nAs we noted above, we cannot yet rule out the possibility that insufficient statistical expertise or poor-quality analyses might drive some portion of the heterogeneity we observed. \nImproving the quality of analyses might be accomplished with a deliberate increase in investment in statistical education. \nMany ecology and evolutionary biology students learn their statistical practice informally, with many ecology doctoral programs in the USA not requiring a statistics course [@touchon2016], and no formal courses of any kind included in doctoral degrees in most other countries. \nIn cases where formal investment in statistical education is lacking, informal resources, such as guidelines and checklists, may help researchers avoid common mistakes. \nHowever, unless following guidelines or checklists in enforced for publication, the adherence to guidelines is patchy. \nFor example, despite the publication of guidelines for conducting meta-analyses in ecology, the quality of meta-analyses did not improve substantially over time [@koricheva2014]. \nEven in medical research where adherence to guidelines such as the PRISMA standards for systematic reviews and meta-analyses is more highly valued, adherence is often poor [@page2017].\n\nAlthough we have reviewed a variety of potential responses to the existence of variability in analytical outcomes, we certainly do not wish to imply that this is a comprehensive set of possible responses. \nNor do we wish to imply that the opinions we have expressed about these options are correct. \nDetermining how the disciplines of ecology and evolutionary biology should respond to knowledge of the variability in analytical outcome will benefit from the contribution and discussion of ideas from across these disciplines. \nWe look forward to learning from these discussions and to seeing how these disciplines ultimately respond.\n\n# Conclusions\n\nOverall, our results suggest to us that, where there is a diverse set of plausible analysis options, no single analysis should be considered a complete or reliable answer to a research question.\nFurther, because of the evidence that ecologists and evolutionary biologists often present a biased subset of the analyses they conduct [@deressa2023; @yang2023; @kimmel2023], we do not expect that even a collection of different effect sizes from different studies will accurately represent the true distribution of effects [@yang2023].\nTherefore, we believe that an increased level of skepticism of the outcomes of single analyses, or even single meta-analyses, is warranted going forward.\nWe recognize that some researchers have long maintained a healthy level of skepticism of individual studies as part of sound and practical scientific practice, and it is possible that those researchers will be neither surprised nor concerned by our results.\nHowever, we doubt that many researchers are sufficiently aware of the potential problems of analytical flexibility to be appropriately skeptical.\nWe hope that our work leads to conversations in ecology, evolutionary biology, and other disciplines about how best to contend with heterogeneity in results that is attributable to analytical decisions.\n\n# Declarations\n\n## Ethics, consent and permissions\n\nWe obtained permission to conduct this research from the Whitman College Institutional Review Board (IRB).\nAs part of this permission, the IRB approved the consent form () that all participants completed prior to joining the study.\nThe authors declare that they have no competing interests.\n\n## Availability of data and materials\n\nAll materials and data are archived and hosted on the OSF at [https://osf.io/mn5aj/](https://osf.io/mn5aj/), including survey instruments and analyst / reviewer consent forms. The Evolutionary Ecology Data and Ecology and Conservation Data provided to analysts are available at [https://osf.io/34fzc/](https://osf.io/34fzc/) and [https://osf.io/t76uy/](https://osf.io/t76uy/) respectively. Data has been anonymised, and the non-anonymised data is stored on the project OSF within private components accessible to the lead authors. \n\nWe built an R package, `ManyEcoEvo` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo` package repository and its outputs are made available to users of the package when the library is loaded.\n\nThe full manuscript, including further analysis and presentation of results is written in Quarto [@AllaireQuarto2024]. The source code to reproduce the manuscript is hosted at [https://github.com/egouldo/ManyAnalysts/](https://github.com/egouldo/ManyAnalysts/), and the rendered version of the source code may be viewed at [https://egouldo.github.io/ManyAnalysts/](https://egouldo.github.io/ManyAnalysts/). All R packages and their versions used in the production of this manuscript are listed in the session info at @sec-sesion-info.\n\n## Competing interests\n\nThe authors declare that they have no competing interests\n\n## Funding\n\nEG's contributions were supported by an Australian Government Research Training Program Scholarship, AIMOS top-up scholarship (2022) and Melbourne Centre of Data Science Doctoral Academy Fellowship (2021).\nFF's contributions were supported by ARC Future Fellowship FT150100297.\n\n## Author's contributions\n\nHF, THP and FF conceptualized the project.\nPV provided raw data for *Eucalyptus* analyses and SG and THP provided raw data for blue tit analyses.\nDGH, HF and THP prepared surveys for collecting participating analysts and reviewer's data.\nEG, HF, THP, PV, SN and FF planned the analyses of the data provided by our analysts and reviewers, EG, HF, and THP curated the data, EG and HF wrote the software code to implement the analyses and prepare data visualisations.\nEG ensured that analyses were documented and reproducible.\nTHP and HF administered the project, including coordinating with analysts and reviewers.\nFF provided funding for the project.\nTHP, HF, and EG wrote the manuscript.\nAuthors listed alphabetically contributed analyses of the primary datasets or reviews of analyses.\nAll authors read and approved the final manuscript.\n\n## References {.unnumbered}\n\n::: {#refs}\n:::\n\n## Session Info {#sec-sesion-info}\n\n\n::: {#tbl-grateful-pkg-list .cell tbl-cap='R packages used to generate this manuscript. Please see the ManyEcoEvo package for a full list of packages used in the analysis pipeline.'}\n::: {.cell-output-display}\n\n\n|Package |Version |Citation |\n|:----------------|:--------|:----------------------|\n|base |4.4.0 |@base |\n|betapart |1.6 |@betapart |\n|broom.mixed |0.2.9.5 |@broommixed |\n|colorspace |2.1.0 |@colorspace2020a |\n|cowplot |1.1.3 |@cowplot |\n|devtools |2.4.5 |@devtools |\n|EnvStats |2.8.1 |@EnvStats-book |\n|GGally |2.2.1 |@GGally |\n|ggforestplot |0.1.0 |@ggforestplot |\n|ggh4x |0.2.8 |@ggh4x |\n|ggpubr |0.6.0 |@ggpubr |\n|ggrepel |0.9.5 |@ggrepel |\n|ggthemes |5.1.0 |@ggthemes |\n|glmmTMB |1.1.8 |@glmmTMB |\n|gt |0.10.1 |@gt |\n|gtsummary |1.7.2 |@gtsummary |\n|here |1.0.1 |@here |\n|Hmisc |5.1.2 |@Hmisc |\n|irr |0.84.1 |@irr |\n|janitor |2.2.0 |@janitor |\n|knitr |1.46 |@knitr2024 |\n|latex2exp |0.9.6 |@latex2exp |\n|lme4 |1.1.35.3 |@lme4 |\n|ManyEcoEvo |1.1.0 |@ManyEcoEvo |\n|metafor |4.6.0 |@metafor |\n|modelbased |0.8.7 |@modelbased |\n|multilevelmod |1.0.0 |@multilevelmod |\n|MuMIn |1.47.5 |@MuMIn |\n|naniar |1.1.0 |@naniar |\n|NatParksPalettes |0.2.0 |@NatParksPalettes |\n|orchaRd |2.0 |@orchaRd |\n|parameters |0.21.7 |@parameters |\n|patchwork |1.2.0 |@patchwork |\n|performance |0.11.0 |@performance |\n|renv |1.0.2 |@renv |\n|rmarkdown |2.27 |@rmarkdown2024 |\n|sae |1.3 |@molina-marhuenda:2015 |\n|scales |1.3.0 |@scales |\n|see |0.8.4 |@see |\n|showtext |0.9.7 |@showtext |\n|specr |1.0.0 |@specr |\n|targets |1.7.0 |@targets |\n|tidymodels |1.1.1 |@tidymodels |\n|tidytext |0.4.2 |@tidytext |\n|tidyverse |2.0.0 |@tidyverse |\n|withr |3.0.0 |@withr |\n|xfun |0.44 |@xfun |\n\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ndevtools::session_info()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.4.0 (2024-04-24)\n os macOS Ventura 13.6.6\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Australia/Melbourne\n date 2024-07-23\n pandoc 3.1.12.2 @ /opt/homebrew/bin/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n P abind 1.4-5 2016-07-21 [?] CRAN (R 4.4.0)\n P backports 1.5.0 2024-05-23 [?] RSPM\n P base64enc 0.1-3 2015-07-28 [?] RSPM\n P bayestestR 0.13.2 2024-02-12 [?] CRAN (R 4.4.0)\n P beeswarm 0.4.0 2021-06-01 [?] CRAN (R 4.4.0)\n P blastula 0.3.5 2024-02-24 [?] RSPM\n P boot 1.3-30 2024-02-26 [?] CRAN (R 4.4.0)\n P broom * 1.0.6 2024-05-17 [?] RSPM\n P broom.helpers 1.15.0 2024-04-05 [?] CRAN (R 4.4.0)\n P broom.mixed * 0.2.9.5 2024-04-01 [?] CRAN (R 4.4.0)\n P cachem 1.1.0 2024-05-16 [?] RSPM\n P car 3.1-2 2023-03-30 [?] CRAN (R 4.4.0)\n P carData 3.0-5 2022-01-06 [?] CRAN (R 4.4.0)\n P checkmate 2.3.1 2023-12-04 [?] CRAN (R 4.4.0)\n P class 7.3-22 2023-05-03 [?] CRAN (R 4.4.0)\n P cli 3.6.2 2023-12-11 [?] RSPM\n P cluster 2.1.6 2023-12-01 [?] CRAN (R 4.4.0)\n P coda 0.19-4.1 2024-01-31 [?] CRAN (R 4.4.0)\n P codetools 0.2-20 2024-03-31 [?] CRAN (R 4.4.0)\n P colorspace 2.1-0 2023-01-23 [?] RSPM\n P commonmark 1.9.1 2024-01-30 [?] RSPM\n CompQuadForm 1.4.3 2017-04-12 [2] CRAN (R 4.4.0)\n P cowplot 1.1.3 2024-01-22 [?] CRAN (R 4.4.0)\n P curl 5.2.1 2024-03-01 [?] RSPM\n P data.table 1.15.4 2024-03-30 [?] RSPM\n P datawizard 0.11.0 2024-06-05 [?] CRAN (R 4.4.0)\n P devtools 2.4.5 2022-10-11 [?] RSPM\n P dials * 1.2.1 2024-02-22 [?] CRAN (R 4.4.0)\n P DiceDesign 1.10 2023-12-07 [?] CRAN (R 4.4.0)\n P digest 0.6.35 2024-03-11 [?] RSPM\n P dplyr * 1.1.4 2023-11-17 [?] RSPM\n P ellipsis 0.3.2 2021-04-29 [?] RSPM\n P emmeans 1.10.2 2024-05-20 [?] CRAN (R 4.4.0)\n P EnvStats 2.8.1 2023-08-22 [?] CRAN (R 4.4.0)\n P estimability 1.5.1 2024-05-12 [?] CRAN (R 4.4.0)\n P evaluate 0.24.0 2024-06-10 [?] RSPM\n P fansi 1.0.6 2023-12-08 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P forcats * 1.0.0 2023-01-29 [?] RSPM\n P foreach 1.5.2 2022-02-02 [?] CRAN (R 4.4.0)\n P foreign 0.8-86 2023-11-28 [?] CRAN (R 4.4.0)\n P Formula 1.2-5 2023-02-24 [?] CRAN (R 4.4.0)\n P fs 1.6.4 2024-04-25 [?] RSPM\n P furrr 0.3.1 2022-08-15 [?] RSPM\n P future 1.33.2 2024-03-26 [?] RSPM\n P future.apply 1.11.2 2024-03-28 [?] CRAN (R 4.4.0)\n P generics 0.1.3 2022-07-05 [?] RSPM\n P ggbeeswarm 0.7.2 2023-04-29 [?] CRAN (R 4.4.0)\n ggeffects 1.6.0 2024-05-18 [2] CRAN (R 4.4.0)\n P ggforestplot 0.1.0 2024-06-16 [?] Github (NightingaleHealth/ggforestplot@547617e)\n P ggplot2 * 3.5.1 2024-04-23 [?] RSPM\n P ggpubr 0.6.0 2023-02-10 [?] CRAN (R 4.4.0)\n P ggsignif 0.6.4 2022-10-13 [?] CRAN (R 4.4.0)\n P globals 0.16.3 2024-03-08 [?] RSPM\n P glue 1.7.0 2024-01-09 [?] RSPM\n P gower 1.0.1 2022-12-22 [?] CRAN (R 4.4.0)\n P GPfit 1.0-8 2019-02-08 [?] CRAN (R 4.4.0)\n P grateful * 0.2.4 2023-10-22 [?] CRAN (R 4.4.0)\n P gridExtra 2.3 2017-09-09 [?] CRAN (R 4.4.0)\n P gt * 0.10.1 2024-01-17 [?] RSPM\n P gtable 0.3.5 2024-04-22 [?] RSPM\n P gtsummary 1.7.2 2023-07-15 [?] CRAN (R 4.4.0)\n P hardhat 1.4.0 2024-06-02 [?] CRAN (R 4.4.0)\n P haven 2.5.4 2023-11-30 [?] RSPM\n P here * 1.0.1 2020-12-13 [?] RSPM\n P Hmisc * 5.1-3 2024-05-28 [?] CRAN (R 4.4.0)\n P hms 1.1.3 2023-03-21 [?] RSPM\n P htmlTable 2.4.2 2023-10-29 [?] CRAN (R 4.4.0)\n P htmltools 0.5.8.1 2024-04-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P httpuv 1.6.15 2024-03-26 [?] RSPM\n P igraph 2.0.3 2024-03-13 [?] CRAN (R 4.4.0)\n P infer * 1.0.7 2024-03-25 [?] CRAN (R 4.4.0)\n P insight 0.20.1 2024-06-11 [?] CRAN (R 4.4.0)\n P ipred 0.9-14 2023-03-09 [?] CRAN (R 4.4.0)\n P irr * 0.84.1 2019-01-26 [?] CRAN (R 4.4.0)\n P iterators 1.0.14 2022-02-05 [?] CRAN (R 4.4.0)\n P janitor 2.2.0 2023-02-02 [?] RSPM\n P jsonlite 1.8.8 2023-12-04 [?] RSPM\n P juicyjuice 0.1.0 2022-11-10 [?] RSPM\n P knitr 1.47 2024-05-29 [?] RSPM\n P labeling 0.4.3 2023-08-29 [?] RSPM\n P later 1.3.2 2023-12-06 [?] RSPM\n P lattice 0.22-6 2024-03-20 [?] CRAN (R 4.4.0)\n P lava 1.8.0 2024-03-05 [?] CRAN (R 4.4.0)\n lavaan 0.6-17 2023-12-20 [2] CRAN (R 4.4.0)\n P lazyeval 0.2.2 2019-03-15 [?] CRAN (R 4.4.0)\n P lhs 1.1.6 2022-12-17 [?] CRAN (R 4.4.0)\n P lifecycle 1.0.4 2023-11-07 [?] RSPM\n P listenv 0.9.1 2024-01-29 [?] RSPM\n P lme4 * 1.1-35.3 2024-04-16 [?] CRAN (R 4.4.0)\n P lpSolve * 5.6.20 2023-12-10 [?] CRAN (R 4.4.0)\n P lubridate * 1.9.3 2023-09-27 [?] RSPM\n P magrittr 2.0.3 2022-03-30 [?] RSPM\n P ManyEcoEvo * 2.0.0 2024-07-17 [?] Github (egouldo/ManyEcoEvo@f0dd6eb)\n P markdown 1.13 2024-06-04 [?] RSPM\n P MASS 7.3-60.2 2024-04-24 [?] local\n P mathjaxr 1.6-0 2022-02-28 [?] CRAN (R 4.4.0)\n P Matrix * 1.7-0 2024-03-22 [?] CRAN (R 4.4.0)\n P memoise 2.0.1 2021-11-26 [?] RSPM\n P metadat * 1.2-0 2022-04-06 [?] CRAN (R 4.4.0)\n P metafor * 4.6-0 2024-03-28 [?] CRAN (R 4.4.0)\n P mime 0.12 2021-09-28 [?] RSPM\n P miniUI 0.1.1.1 2018-05-18 [?] RSPM\n P minqa 1.2.7 2024-05-20 [?] CRAN (R 4.4.0)\n mnormt 2.1.1 2022-09-26 [2] CRAN (R 4.4.0)\n P modelbased * 0.8.8 2024-06-11 [?] RSPM\n P modeldata * 1.3.0 2024-01-21 [?] CRAN (R 4.4.0)\n multcomp 1.4-25 2023-06-20 [2] CRAN (R 4.4.0)\n P MuMIn * 1.47.5 2023-03-22 [?] CRAN (R 4.4.0)\n P munsell 0.5.1 2024-04-01 [?] RSPM\n P mvtnorm 1.2-5 2024-05-21 [?] CRAN (R 4.4.0)\n P NatParksPalettes 0.2.0 2022-10-09 [?] CRAN (R 4.4.0)\n P nlme 3.1-164 2023-11-27 [?] CRAN (R 4.4.0)\n P nloptr 2.0.3 2022-05-26 [?] CRAN (R 4.4.0)\n P nnet 7.3-19 2023-05-03 [?] CRAN (R 4.4.0)\n nonnest2 0.5-7 2024-05-06 [2] CRAN (R 4.4.0)\n P numDeriv * 2016.8-1.1 2019-06-06 [?] CRAN (R 4.4.0)\n P orchaRd 2.0 2024-06-08 [?] Github (daniel1noble/orchaRd@15423d3)\n P parallelly 1.37.1 2024-02-29 [?] RSPM\n P parameters * 0.21.7 2024-05-14 [?] CRAN (R 4.4.0)\n P parsnip * 1.2.1 2024-03-22 [?] CRAN (R 4.4.0)\n P patchwork * 1.2.0 2024-01-08 [?] CRAN (R 4.4.0)\n pbivnorm 0.6.0 2015-01-23 [2] CRAN (R 4.4.0)\n P pbkrtest 0.5.2 2023-01-19 [?] CRAN (R 4.4.0)\n P performance 0.12.0 2024-06-08 [?] CRAN (R 4.4.0)\n P pillar 1.9.0 2023-03-22 [?] RSPM\n P pkgbuild 1.4.4 2024-03-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n P pkgload 1.3.4 2024-01-16 [?] RSPM\n P pointblank 0.12.1 2024-03-25 [?] RSPM\n pracma 2.4.4 2023-11-10 [2] CRAN (R 4.4.0)\n P prodlim 2023.08.28 2023-08-28 [?] CRAN (R 4.4.0)\n P profvis 0.3.8 2023-05-02 [?] RSPM\n P promises 1.3.0 2024-04-05 [?] RSPM\n P purrr * 1.0.2 2023-08-10 [?] RSPM\n quadprog 1.5-8 2019-11-20 [2] CRAN (R 4.4.0)\n P R6 2.5.1 2021-08-19 [?] RSPM\n P Rcpp 1.0.12 2024-01-09 [?] RSPM\n P readr * 2.1.5 2024-01-10 [?] RSPM\n P recipes * 1.0.10 2024-02-18 [?] CRAN (R 4.4.0)\n P remotes 2.5.0 2024-03-17 [?] RSPM\n renv 1.0.7 2024-04-11 [1] CRAN (R 4.4.0)\n P rlang 1.1.4 2024-06-04 [?] RSPM\n P rmarkdown 2.27 2024-05-17 [?] RSPM\n P rpart 4.1.23 2023-12-05 [?] CRAN (R 4.4.0)\n P rprojroot 2.0.4 2023-11-05 [?] RSPM\n P rsample * 1.2.1 2024-03-25 [?] CRAN (R 4.4.0)\n P rstatix 0.7.2 2023-02-01 [?] CRAN (R 4.4.0)\n P rstudioapi 0.16.0 2024-03-24 [?] RSPM\n P sae 1.3 2020-03-01 [?] CRAN (R 4.4.0)\n sandwich 3.1-0 2023-12-11 [2] CRAN (R 4.4.0)\n P sass 0.4.9 2024-03-15 [?] RSPM\n P scales * 1.3.0 2023-11-28 [?] RSPM\n P see 0.8.4 2024-04-29 [?] CRAN (R 4.4.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM\n P shiny 1.8.1.1 2024-04-02 [?] RSPM\n sjlabelled 1.2.0 2022-04-10 [2] CRAN (R 4.4.0)\n P snakecase 0.11.1 2023-08-27 [?] RSPM\n P specr * 1.0.0 2023-01-20 [?] CRAN (R 4.4.0)\n P stringi 1.8.4 2024-05-06 [?] RSPM\n P stringr * 1.5.1 2023-11-14 [?] RSPM\n P survival 3.5-8 2024-02-14 [?] CRAN (R 4.4.0)\n TH.data 1.1-2 2023-04-17 [2] CRAN (R 4.4.0)\n P tibble * 3.2.1 2023-03-20 [?] RSPM\n P tidymodels * 1.2.0 2024-03-25 [?] CRAN (R 4.4.0)\n P tidyr * 1.3.1 2024-01-24 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM\n P timechange 0.3.0 2024-01-18 [?] RSPM\n P timeDate 4032.109 2023-12-14 [?] CRAN (R 4.4.0)\n P tune * 1.2.1 2024-04-18 [?] CRAN (R 4.4.0)\n P tzdb 0.4.0 2023-05-12 [?] RSPM\n P urlchecker 1.0.1 2021-11-30 [?] RSPM\n P usethis 2.2.3 2024-02-19 [?] RSPM\n P utf8 1.2.4 2023-10-22 [?] RSPM\n P V8 4.4.2 2024-02-15 [?] RSPM\n P vctrs 0.6.5 2023-12-01 [?] RSPM\n P vipor 0.4.7 2023-12-18 [?] CRAN (R 4.4.0)\n P withr 3.0.0 2024-01-16 [?] RSPM\n P workflows * 1.1.4 2024-02-19 [?] CRAN (R 4.4.0)\n P workflowsets * 1.1.0 2024-03-21 [?] CRAN (R 4.4.0)\n P xfun 0.44 2024-05-15 [?] RSPM\n P xml2 1.3.6 2023-12-04 [?] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n P yaml 2.3.8 2023-12-11 [?] RSPM\n P yardstick * 1.3.1 2024-03-21 [?] CRAN (R 4.4.0)\n zoo 1.8-12 2023-04-13 [2] CRAN (R 4.4.0)\n\n [1] /Users/elliotgould/Documents/GitHub/ManyAnalysts/renv/library/macos/R-4.4/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", + "markdown": "---\ntitle: \"Same data, different analysts: variation in effect sizes due to analytical decisions in ecology and evolutionary biology.\"\nabstract: |\n Although variation in effect sizes and predicted values among studies of similar phenomena is inevitable, such variation far exceeds what might be produced by sampling error alone. One possible explanation for variation among results is differences among researchers in the decisions they make regarding statistical analyses. A growing array of studies has explored this analytical variability in different fields and has found substantial variability among results despite analysts having the same data and research question. Many of these studies have been in the social sciences, but one small ‘many analyst’ study found similar variability in ecology. We expanded the scope of this prior work by implementing a large-scale empirical exploration of the variation in effect sizes and model predictions generated by the analytical decisions of different researchers in ecology and evolutionary biology. We used two unpublished datasets, one from evolutionary ecology (blue tit, *Cyanistes caeruleus*, to compare sibling number and nestling growth) and one from conservation ecology (*Eucalyptus*, to compare grass cover and tree seedling recruitment), and the project leaders recruited 174 analyst teams, comprising 246 analysts, to investigate the answers to prespecified research questions. Analyses conducted by these teams yielded 141 usable effects (compatible with our meta-analyses and all necessary information provided) for the blue tit dataset, and 85 usable effects for the *Eucalyptus* dataset. We found substantial heterogeneity among results for both datasets, although the patterns of variation differed between them. For the blue tit analyses, the average effect was convincingly negative, with less growth for nestlings living with more siblings, but there was near continuous variation in effect size from large negative effects to effects near zero, and even effects crossing the traditional threshold of statistical significance in the opposite direction. In contrast, the average relationship between grass cover and *Eucalyptus* seedling number was only slightly negative and not convincingly different from zero, and most effects ranged from weakly negative to weakly positive, with about a third of effects crossing the traditional threshold of significance in one direction or the other. However, there were also several striking outliers in the *Eucalyptus* dataset, with effects far from zero. For both datasets, we found substantial variation in the variable selection and random effects structures among analyses, as well as in the ratings of the analytical methods by peer reviewers, but we found no strong relationship between any of these and deviation from the meta-analytic mean. In other words, analyses with results that were far from the mean were no more or less likely to have dissimilar variable sets, use random effects in their models, or receive poor peer reviews than those analyses that found results that were close to the mean. The existence of substantial variability among analysis outcomes raises important questions about how ecologists and evolutionary biologists should interpret published results, and how they should conduct analyses in the future. \nauthors:\n - name: \"Elliot Gould\"\n orcid: \"0000-0002-6585-538X\"\n affiliation:\n - name: The University of Melbourne\n department: School of Agriculture Food and Ecosystem Sciences\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Hannah S. Fraser\"\n orcid: \"0000-0003-2443-4463\"\n affiliation:\n - name: The University of Melbourne\n department: School of Historical and Philosophical Studies\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Timothy H. Parker\"\n email: \"parkerth@whitman.edu\"\n orcid: \"0000-0003-2995-5284\"\n attributes:\n corresponding: true\n roles:\n - Investigation\n - Manuscript Writing\n affiliation:\n - name: Whitman College\n department: Department of Biology\n - name: \"Shinichi Nakagawa\"\n orcid: \"0000-0002-7765-5182\"\n affiliation:\n - name: The University of New South Wales\n department: School of Biological, Earth & Environmental Sciences\n roles:\n - Software\n - Investigation\n - Manuscript Writing\n - name: \"Simon C. Griffith\"\n orcid: \"0000-0001-7612-4999\"\n affiliation:\n - name: Macquarie University\n department: chool of Natural Sciences\n roles:\n - Manuscript Writing\n - name: \"Peter A. Vesk\"\n orcid: \"0000-0003-2008-7062\"\n affiliation:\n - name: The University of Melbourne\n department: School of Biological, Earth & Environmental Sciences\n roles:\n - Manuscript Writing\n - name: \"Fiona Fidler\"\n orcid: \"0000-0002-2700-2562\"\n affiliation:\n - name: The University of Melbourne\n department: School of Historical and Philosophical Studies\n roles:\n - Manuscript Writing\n - name: \"Daniel G. Hamilton\"\n orcid: \"0000-0001-8104-474X\"\n affiliation:\n - name: \"The University of Melbourne\"\n department: \"School of BioSciences\"\n - name: \"Robin N Abbey-Lee\" \n affiliation: \n - name: \"Länsstyrelsen Östergötland\" \n department: \"\"\n - name: \"Jessica K. Abbott\" \n orcid: \"0000-0002-8743-2089\" \n affiliation: \n - name: \"Lund University\" \n department: \"Biology Department\"\n - name: \"Luis A. Aguirre\" \n orcid: \"0000-0001-9796-9755\" \n affiliation: \n - name: \"University of Massachusetts\" \n department: \"Department of Biology\"\n - name: \"Carles Alcaraz\" \n orcid: \"0000-0002-2147-4796\" \n affiliation: \n - name: \"IRTA\" \n department: \"Marine and Continental Waters\"\n - name: \"Irith Aloni\"\n orcid: \"0000-0002-7777-3365\"\n affiliation:\n - name: \"Ben Gurion University of the Negev\"\n department: \"Dept. of Life Sciences\"\n - name: \"Drew Altschul\" \n orcid: \"0000-0001-7053-4209\" \n affiliation: \n - name: \"The University of Edinburgh\" \n department: \"Department of Psychology\"\n - name: \"Kunal Arekar\" \n orcid: \"0000-0003-1060-5911\" \n affiliation: \n - name: \"Indian Institute of Science\" \n department: \"Centre for Ecological Sciences\"\n - name: \"Jeff W. Atkins\" \n orcid: \"0000-0002-2295-3131\" \n affiliation: \n - name: \"USDA Forest Service\"\n department: \"Southern Research Station\"\n - name: \"Joe Atkinson\" \n orcid: \"0000-0001-9232-4421\" \n affiliation: \n - name: \"Aarhus University\" \n department: \"Center for Ecological Dynamics in a Novel Biosphere (ECONOVO), Department of Biology\"\n - name: \"Christopher M. Baker\"\n orcid: \"0000-0001-9449-3632\"\n affiliation:\n - name: \"The University of Melbourne\"\n department: \"School of Mathematics and Statistics\"\n - name: \"Meghan Barrett\" \n affiliation: \n - name: \"Indiana University Purdue University Indianapolis\" \n department: \"Biology\"\n - name: \"Kristian Bell\"\n orcid: \"0000-0002-1857-6257\"\n affiliation: \n - name: \"Deakin University\"\n department: \"School of Life and Environmental Sciences\"\n - name: \"Suleiman Kehinde Bello\" \n orcid: \"0000-0001-6718-9256\" \n affiliation: \n - name: \"King Abdulaziz University\" \n department: \"Department of Arid Land Agriculture\"\n - name: \"Iván Beltrán\"\n orcid: \"0000-0003-4439-8391\"\n affiliation:\n - name: \"Macquarie University\"\n department: \"Department of Biological Sciences\"\n - name: \"Bernd J. Berauer\" \n orcid: \"0000-0002-9472-1532\" \n affiliation: \n - name: \"University of Hohenheim, Institute of Landscape and Plant Ecology\" \n department: \"Department of Plant Ecology\"\n - name: \"Michael Grant Bertram\" \n orcid: \"0000-0001-5320-8444\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Peter D. Billman\" \n orcid: \"0000-0002-4072-4965\" \n affiliation: \n - name: \"University of Connecticut\" \n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Charlie K Blake\" \n orcid: \"0000-0003-4649-3472\" \n affiliation: \n - name: \"Southern Illinois University Edwardsville\" \n department: \"STEM Center\"\n - name: \"Shannon Blake\" \n affiliation: \n - name: \"University of Guelph\" \n department: \"\"\n - name: \"Louis Bliard\" \n orcid: \"0000-0002-2349-8513\" \n affiliation: \n - name: \"University of Zurich\" \n department: \"Department of Evolutionary Biology and Environmental Studies\"\n - name: \"Andrea Bonisoli-Alquati\" \n orcid: \"0000-0002-9255-7556\" \n affiliation: \n - name: \"California State Polytechnic University, Pomona\" \n department: \"Department of Biological Sciences\"\n - name: \"Timothée Bonnet\" \n orcid: \"0000-0001-7186-5288\" \n affiliation: \n - name: \"UMR 7372 Université de la Rochelle - Centre National de la Recherche Scientifique\" \n department: \"Centre d'Études Biologiques de Chizé\"\n - name: \"Camille Nina Marion Bordes\" \n orcid: \"0000-0002-3561-2811\" \n affiliation: \n - name: \"Bar Ilan University\" \n department: \"Faculty of Life Sciences\"\n - name: \"Aneesh P. H. Bose\" \n orcid: \"0000-0001-5716-0097\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Thomas Botterill-James\" \n orcid: \"0000-0002-6186-5871\" \n affiliation: \n - name: \"University of Tasmania\" \n department: \"School of Natural Sciences\"\n - name: \"Melissa Anna Boyd\" \n orcid: \"0000-0003-2681-8567\" \n affiliation: \n - name: \"Whitebark Institute\" \n department: \"\"\n - name: \"Sarah A. Boyle\" \n orcid: \"0000-0001-9498-6787\" \n affiliation: \n - name: \"Rhodes College\" \n department: \"Department of Biology\"\n - name: \"Tom Bradfer-Lawrence\" \n orcid: \"0000-0001-6045-4360\" \n affiliation: \n - name: \"RSPB\" \n department: \"Centre for Conservation Science\"\n - name: \"Jennifer Bradham\"\n orcid: \"\"\n affiliation:\n - name: \"Wofford College\"\n department: \"Environmental Studies\"\n - name: \"Jack A Brand\" \n orcid: \"0000-0003-3312-941X\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Martin I. Brengdahl\" \n orcid: \"0000-0002-1052-7274\" \n affiliation: \n - name: \"Linköping University\" \n department: \"IFM Biology\"\n - name: \"Martin Bulla\" \n orcid: \"0000-0002-1328-1927\" \n affiliation: \n - name: \"Czech University of Life Sciences Prague\" \n department: \"Faculty of Environmental Sciences\"\n - name: \"Luc Bussière\" \n orcid: \"0000-0001-8937-8381\" \n affiliation: \n - name: \"University of Gothenburg\" \n department: \"Biological and Environmental Sciences & Gothenburg Global Biodiversity Centre\"\n - name: \"Ettore Camerlenghi\" \n orcid: \"0000-0002-6203-069X\" \n affiliation: \n - name: \"Monash University\" \n department: \"School of Biological Sciences\"\n - name: \"Sara E. Campbell\" \n orcid: \"0000-0001-7195-8898\" \n affiliation: \n - name: \"University of Tennessee Knoxville\" \n department: \"Ecology and Evolutionary Biology\"\n - name: \"Leonardo L F Campos\" \n orcid: \"0000-0002-0186-8552\" \n affiliation: \n - name: \"Universidade Federal de Santa Catarina\" \n department: \"Departamento de Ecologia e Zoologia\"\n - name: \"Anthony Caravaggi\" \n orcid: \"0000-0002-1763-8970\" \n affiliation: \n - name: \"University of South Wales\" \n department: \"School of Biological and Forensic Sciences\"\n - name: \"Pedro Cardoso\" \n orcid: \"0000-0001-8119-9960\" \n affiliation: \n - name: \"Faculdade de Ciências, Universidade de Lisboa\" \n department: \"Centre for Ecology, Evolution and Environmental Changes (cE3c) & CHANGE - Global Change and Sustainability Institute\"\n - name: \"Charles J.W. Carroll\" \n affiliation: \n - name: \"Colorado State University\" \n department: \"Forest and Rangeland Stewardship\"\n - name: \"Therese A. Catanach\" \n orcid: \"0000-0003-3850-1196\" \n affiliation: \n - name: \"Academy of Natural Sciences of Drexel University\" \n department: \"Department of Ornithology\"\n - name: \"Xuan Chen\" \n orcid: \"0000-0002-9499-0054\" \n affiliation: \n - name: \"Salisbury University\" \n department: \"Department of Biological Sciences\"\n - name: \"Heung Ying Janet Chik\" \n orcid: \"0000-0003-4646-4444\" \n affiliation: \n - name: \"University of Groningen\" \n department: \"Groningen Institute for Evolutionary Life Sciences\"\n - name: \"Emily Sarah Choy\" \n orcid: \" 0000-0002-4703-4318\" \n affiliation: \n - name: \"McMaster University\" \n department: \"Department of Biology\"\n - name: \"Alec Philip Christie\" \n orcid: \"0000-0002-8465-8410\" \n affiliation: \n - name: \"University of Cambridge \" \n department: \"Department of Zoology\"\n - name: \"Angela Chuang\" \n orcid: \"0000-0001-6847-5115\" \n affiliation: \n - name: \"University of Florida\" \n department: \"Entomology and Nematology\"\n - name: \"Amanda J. Chunco\" \n orcid: \"0000-0002-8265-327X\" \n affiliation: \n - name: \"Elon University\" \n department: \"Environmental Studies\"\n - name: \"Bethany L Clark\" \n orcid: \"0000-0001-5803-7744\" \n affiliation: \n - name: \"BirdLife International\" \n department: \"\"\n - name: \"Andrea Contina\"\n orcid: \"0000-0002-0484-6711\"\n affiliation:\n - name: \"The University of Texas Rio Grande Valley\"\n department: \"School of Integrative Biological and Chemical Sciences\"\n - name: \"Garth A Covernton\"\n orcid: \"0000-0003-3814-4918\"\n affiliation:\n - name: \"University of Toronto\"\n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Murray P. Cox\" \n orcid: \"0000-0003-1936-0236\" \n affiliation: \n - name: \"University of Auckland\" \n department: \"Department of Statistics\"\n - name: \"Kimberly A. Cressman\" \n affiliation: \n - name: \"Catbird Stats, LLC\" \n department: \"\"\n - name: \"Marco Crotti\"\n orcid: \"0000-0002-8619-7988\"\n affiliation:\n - name: \"University of Glasgow\"\n department: \"School of Biodiversity, One Health & Veterinary Medicine\"\n - name: \"Connor Davidson Crouch\" \n orcid: \"0000-0003-0353-5820\" \n affiliation: \n - name: \"Northern Arizona University\" \n department: \"School of Forestry\"\n - name: \"Pietro B. D'Amelio\" \n orcid: \"0000-0002-4095-6088\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Behavioural Neurobiology\"\n - name: \"Alexandra Allison de Sousa\" \n orcid: \"0000-0003-2379-3894\" \n affiliation: \n - name: \"Bath Spa University\" \n department: \"School of Sciences: Center for Health and Cognition\"\n - name: \"Timm Fabian Döbert\" \n orcid: \"0000-0002-1601-8665\" \n affiliation: \n - name: \"University of Alberta\" \n department: \"Department of Biological Sciences\"\n - name: \"Ralph Dobler\" \n affiliation: \n - name: \"TU Dresden\" \n department: \"Applied Zoology\"\n - name: \"Adam J Dobson\" \n orcid: \"0000-0003-1541-927X\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"School of Molecular Biosciences, College of Medical Veterinary & Life Sciences,\"\n - name: \"Tim S. Doherty\" \n affiliation: \n - name: \"The University of Sydney\" \n department: \"School of Life and Environmental Sciences\"\n - name: \"Szymon Marian Drobniak\" \n orcid: \"0000-0001-8101-6247\" \n affiliation: \n - name: \"Jagiellonian University\" \n department: \"Institute of Environmental Sciences\"\n - name: \"Alexandra Grace Duffy\" \n orcid: \"0000-0002-7069-5384\" \n affiliation: \n - name: \"Brigham Young University\" \n department: \"Biology Department\"\n - name: \"Alison B. Duncan\"\n orcid: \"0000-0002-6499-2913\"\n affiliation:\n - name: \"University of Montpellier, CNRS, IRD.\"\n department: \"Institute of Evolutionary Sciences Montpellier,\"\n - name: \"Robert P. Dunn\" \n orcid: \"0000-0002-6356-4458\" \n affiliation: \n - name: \"University of South Carolina\" \n department: \"Baruch Marine Field Laboratory\"\n - name: \"Jamie Dunning\" \n affiliation: \n - name: \"Imperial College London\" \n department: \"Department of Life Sciences\"\n - name: \"Trishna Dutta\"\n orcid: \"0000-0002-5236-2658\"\n affiliation:\n - name: \"European Forest Institute\"\n department: \"\"\n - name: \"Luke Eberhart-Hertel\" \n orcid: \"0000-0001-7311-6088\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Ornithology\"\n - name: \"Jared Alan Elmore\" \n orcid: \"0000-0002-5980-1561\" \n affiliation: \n - name: \"Clemson University\" \n department: \"Forestry and Environmental Conservation, National Bobwhite and Grassland Initiative\"\n - name: \"Mahmoud Medhat Elsherif\" \n orcid: \"0000-0002-0540-3998\" \n affiliation: \n - name: \"University of Birmingham, Baily Thomas Grant\" \n department: \"Department of Psychology and Vision Science\"\n - name: \"Holly M English\" \n orcid: \"0000-0002-8854-6707\" \n affiliation: \n - name: \"University College Dublin\" \n department: \"School of Biology and Environmental Science\"\n - name: \"David C. Ensminger\" \n orcid: \"0000-0001-5554-1638\" \n affiliation: \n - name: \"San José State University\" \n department: \"Department of Biological Sciences\"\n - name: \"Ulrich Rainer Ernst\" \n orcid: \"0000-0002-6330-5341\" \n affiliation: \n - name: \"University of Hohenheim\" \n department: \"Apicultural State Institute\"\n - name: \"Stephen M. Ferguson\" \n orcid: \"0000-0003-1577-2727\" \n affiliation: \n - name: \"St. Norbert College\" \n department: \"Department of Biology\"\n - name: \"Esteban Fernandez-Juricic\"\n orcid: \"0000-0001-5290-8078\"\n affiliation:\n - name: \"Purdue University\"\n department: \"Department of Biological Sciences\"\n - name: \"Thalita Ferreira-Arruda Ferreira-Arruda\" \n orcid: \"0000-0003-1385-0226\" \n affiliation: \n - name: \"University of Göttingen\"\n department: \"Biodiversity, Macroecology & Biogeography, Faculty of Forest Sciences and Forest Ecology\"\n - name: \"John Fieberg\" \n orcid: \"0000-0002-3180-7021\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife, and Conservation Biology\"\n - name: \"Elizabeth A Finch\" \n orcid: \"0000-0002-7031-5708\" \n affiliation: \n - name: \"CABI\" \n department: \"\"\n - name: \"Evan A. Fiorenza\" \n orcid: \"0000-0002-5421-0148\" \n affiliation: \n - name: \"University of California, Irvine\" \n department: \"Department of Ecology and Evolutionary Biology, School of Biological Sciences\"\n - name: \"David N Fisher\" \n orcid: \"0000-0002-4444-4450\" \n affiliation: \n - name: \"University of Aberdeen\" \n department: \"School of Biological Sciences\"\n - name: \"Amélie Fontaine\"\n orcid: \"\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Wolfgang Forstmeier\" \n orcid: \"0000-0002-5984-8925\" \n affiliation: \n - name: \"Max Planck Institute for Biological Intelligence\" \n department: \"Department of Ornithology\"\n - name: \"Yoan Fourcade\" \n orcid: \"0000-0003-3820-946X\" \n affiliation: \n - name: \"Univ. Paris-Est Creteil\" \n department: \"Institute of Ecology and Environmental Sciences (iEES)\"\n - name: \"Graham S. Frank\" \n orcid: \"0000-0002-0151-3807\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society\"\n - name: \"Cathryn A. Freund\" \n orcid: \"0000-0002-1570-5519\" \n affiliation: \n - name: \"Wake Forest University\" \n department: \"\"\n - name: \"Eduardo Fuentes-Lillo\"\n orcid: \"0000-0001-5657-954X\"\n affiliation:\n - name: \"Instituto de Ecología y Biodiversidad\"\n department: \"Laboratorio de Invasiones Biológicas (LIB)\"\n - name: \"Sara L. Gandy\" \n orcid: \"0000-0003-2579-4479\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"Institute for Biodiversity, Animal Health and Comparative Medicine\"\n - name: \"Dustin G. Gannon\" \n orcid: \"0000-0002-6936-8626\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society, College of Forestry\"\n - name: \"Ana I. García-Cervigón\" \n orcid: \"0000-0001-6651-2445\" \n affiliation: \n - name: \"Rey Juan Carlos University\" \n department: \"Biodiversity and Conservation Area\"\n - name: \"Alexis C. Garretson\"\n orcid: \"0000-0002-7260-0131\"\n affiliation:\n - name: \"Tufts University\"\n department: \"Graduate School of Biomedical Sciences\"\n - name: \"Xuezhen Ge\"\n orcid: \"0000-0002-5527-6720\"\n affiliation:\n - name: \"University of Guelph\"\n department: \"Department of Integrative Biology\"\n - name: \"William L. Geary\"\n orcid: \"0000-0002-6520-689X\"\n affiliation:\n - name: \"Deakin University\"\n department: \"School of Life and Environmental Sciences (Burwood Campus)\"\n - name: \"Charly Géron\" \n orcid: \"0000-0001-7912-4708\" \n affiliation: \n - name: \"University of Rennes\" \n department: \"CNRS\"\n - name: \"Marc Gilles\" \n orcid: \"0000-0003-4222-9754\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Department of Behavioural Ecology\"\n - name: \"Antje Girndt\" \n orcid: \"0000-0002-9558-1201\" \n affiliation: \n - name: \"Universität Bielefeld\" \n department: \"Fakultät für Biologie, Arbeitsgruppe Evolutionsbiologie\"\n - name: \"Daniel Gliksman\" \n affiliation: \n - name: \"Technische Universität Dresden\" \n department: \"Chair of Meteorology, Institute for Hydrology and Meteorology, Faculty of Environmental Sciences\"\n - name: \"Harrison B Goldspiel\" \n orcid: \"0000-0001-9193-8165\" \n affiliation: \n - name: \"University of Maine\" \n department: \"Department of Wildlife, Fisheries, and Conservation Biology\"\n - name: \"Dylan G. E. Gomes\" \n orcid: \"0000-0002-2642-3728\" \n affiliation: \n - name: \"Boise State University\" \n department: \"Department of Biological Sciences\"\n - name: \"Megan Kate Good\"\n orcid: \"0000-0002-6908-1633\"\t \n affiliation:\t \n - name: \"The University of Melbourne\"\t \n department: \"School of Agriculture, Food and Ecosystem Sciences\"\n - name: \"Sarah C Goslee\" \n orcid: \"0000-0002-5939-3297\" \n affiliation: \n - name: \"USDA Agricultural Research Service\" \n department: \"Pastures Systems and Watershed Management Research Unit\"\n - name: \"J. Stephen Gosnell\" \n orcid: \"0000-0002-2103-2728\" \n affiliation: \n - name: \"Baruch College, City University of New York\" \n department: \"Department of Natural Sciences\"\n - name: \"Eliza M. Grames\"\n orcid: \"0000-0003-1743-6815\"\n affiliation:\n - name: \"Binghamton University\"\n department: \"Department of Biological Sciences\"\n - name: \"Paolo Gratton\" \n orcid: \"0000-0001-8464-4062\" \n affiliation: \n - name: \"Università di Roma 'Tor Vergata\"\n department: \"Dipartimento di Biologia\"\n - name: \"Nicholas M. Grebe\" \n orcid: \"0000-0003-1411-065X\" \n affiliation: \n - name: \"University of Michigan\" \n department: \"Department of Anthropology\"\n - name: \"Skye M. Greenler\" \n orcid: \"0000-0002-4454-8970\" \n affiliation: \n - name: \"Oregon State University\" \n department: \"College of Forestry\"\n - name: \"Maaike Griffioen\"\n orcid: \"0000-0002-9311-8811\"\n affiliation:\n - name: \"University of Antwerp\"\n department: \"\"\n - name: \"Daniel M Griffith\" \n orcid: \"0000-0001-7463-4004\" \n affiliation: \n - name: \"Wesleyan University\" \n department: \"Earth & Environmental Sciences\"\n - name: \"Frances J. Griffith\" \n orcid: \"0000-0001-9238-0212\" \n affiliation: \n - name: \"Yale University\" \n department: \"Yale School of Medicine, Department of Psychiatry\"\n - name: \"Jake J. Grossman\" \n orcid: \"0000-0001-6468-8551\" \n affiliation: \n - name: \"St. Olaf College\" \n department: \"Biology Department and Environmental Studies Department\"\n - name: \"Ali Güncan\" \n orcid: \"0000-0003-1765-648X\" \n affiliation: \n - name: \"Ordu Uniersity\" \n department: \"Department of Plant Protection, Faculty of Agriculture\"\n - name: \"Stef Haesen\" \n orcid: \"0000-0002-4491-4213\" \n affiliation: \n - name: \"KU Leuven\" \n department: \"Department of Earth and Environmental Sciences\"\n - name: \"James G. Hagan\" \n orcid: \"0000-0002-7504-3393\" \n affiliation: \n - name: \"University of Gothenburg\" \n department: \"Department of Marine Sciences\"\n - name: \"Heather A. Hager\"\n orcid: \"0000-0002-0066-6844\"\n affiliation:\n - name: \"Wilfrid Laurier University\"\n department: \"Department of Biology\"\n - name: \"Jonathan Philo Harris\"\n orcid: \"0000-0003-4428-903X\"\n affiliation:\n - name: \"Iowa State University\"\n department: \"Natural Resource Ecology and Management\"\n - name: \"Natasha Dean Harrison\" \n orcid: \"0000-0001-5779-0187\" \n affiliation: \n - name: \"University of Western Australia\" \n department: \"School of Biological Sciences\"\n - name: \"Sarah Syedia Hasnain\" \n orcid: \"0000-0003-4358-5478\" \n affiliation: \n - name: \"Middle East Technical University\" \n department: \"Department of Biological Sciences\"\n - name: \"Justin Chase Havird\" \n orcid: \"0000-0002-8692-6503\" \n affiliation: \n - name: \"University of Texas at Austin\" \n department: \"Dept. of Integrative Biology\"\n - name: \"Andrew J. Heaton\" \n orcid: \"0000-0002-1916-9979\" \n affiliation: \n - name: \"Grand Bay National Estuarine Research Reserve\" \n department: \"\"\n - name: \"María Laura Herrera-Chaustre\"\n orcid: \"0009-0006-2890-5583\"\n affiliation:\n - name: \"Universidad de los Andes\"\n department: \"\"\n - name: \"Tanner J. Howard\"\n orcid: \"0000-0001-7772-1613\"\n affiliation:\t\n - name: \"\"\n department: \"\"\n - name: \"Bin-Yan Hsu\" \n orcid: \"0000-0002-3799-0509\" \n affiliation: \n - name: \"University of Turku\" \n department: \"Department of Biology\"\n - name: \"Fabiola Iannarilli\"\n orcid: \"0000-0002-7018-3557\"\n affiliation:\n - name: \"University of Minnesota\"\n department: \"Dept of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Esperanza C. Iranzo\" \n orcid: \"0000-0001-9411-8437\" \n affiliation: \n - name: \"Universidad Austral de Chile\" \n department: \"Instituto de Ciencia Animal. Facultad de Ciencias Veterinarias\"\n - name: \"Erik N. K. Iverson\" \n orcid: \"0000-0002-3756-9511\" \n affiliation: \n - name: \"The University of Texas at Austin\" \n department: \"Department of Integrative Biology\"\n - name: \"Saheed Olaide Jimoh\" \n orcid: \"0000-0002-3238-9079 \" \n affiliation: \n - name: \"University of Wyoming\" \n department: \"Department of Botany\"\n - name: \"Douglas H. Johnson\" \n orcid: \"0000-0002-7778-6641\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife, and Conservation Biology, University of Minnesota\"\n - name: \"Martin Johnsson\" \n orcid: \"0000-0003-1262-4585\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Animal Breeding and Genetics\"\n - name: \"Jesse Jorna\" \n affiliation: \n - name: \"Brigham Young University\" \n department: \"Department of Biology\"\n - name: \"Tommaso Jucker\" \n orcid: \"0000-0002-0751-6312\" \n affiliation: \n - name: \"University of Bristol\" \n department: \"School of Biological Sciences\"\n - name: \"Martin Jung\" \n orcid: \"0000-0002-7569-1390\" \n affiliation: \n - name: \"International Institute for Applied Systems Analysis (IIASA)\" \n department: \"\"\n - name: \"Ineta Kačergytė\" \n orcid: \"0000-0003-4756-8253\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Ecology\"\n - name: \"Oliver Kaltz\"\n orcid: \"\"\n affiliation:\n - name: \"Université de Montpellier\"\n department: \"\"\n - name: \"Alison Ke\" \n orcid: \"0000-0001-9111-449X\" \n affiliation: \n - name: \"University of California, Davis\" \n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"Clint D. Kelly\" \n orcid: \"0000-0002-0693-7211\" \n affiliation: \n - name: \"Université du Québec à Montréal\" \n department: \"Département des Sciences biologiques\"\n - name: \"Katharine Keogan\" \n orcid: \"0000-0002-1801-7412\" \n affiliation: \n - name: \"University of Edinburgh\" \n department: \"Institute of Evolutionary Biology\"\n - name: \"Friedrich Wolfgang Keppeler\" \n orcid: \"0000-0002-5165-1298\" \n affiliation: \n - name: \"Center for Limnology, University of Wisconsin - Madison\" \n department: \"Center for Limnology\"\n - name: \"Alexander K. Killion\" \n orcid: \"0000-0003-1449-8295\" \n affiliation: \n - name: \"Yale University\" \n department: \"Center for Biodiversity and Global Change\"\n - name: \"Dongmin Kim\" \n orcid: \"0000-0002-1508-1590\" \n affiliation: \n - name: \"University of Minnesota, St. Paul\" \n department: \"Department of Ecology, Evolution, and Behavior\"\n - name: \"David P Kochan\" \n orcid: \"0000-0002-3643-3516\" \n affiliation: \n - name: \"Florida International University\" \n department: \"Institute of Environment and Department of Biological Sciences\"\n - name: \"Peter Korsten\" \n orcid: \"0000-0003-0814-9099\" \n affiliation: \n - name: \"Aberystwyth University\" \n department: \"Department of Life Sciences\"\n - name: \"Shan Kothari\" \n orcid: \"0000-0001-9445-5548\" \n affiliation: \n - name: \"Université de Montréal\" \n department: \"Institut de recherche en biologie végétale\"\n - name: \"Jonas Kuppler\" \n orcid: \"0000-0003-4409-9367\" \n affiliation: \n - name: \"Ulm University\" \n department: \"Institute of Evolutionary Ecology and Conservation Genomics\"\n - name: \"Jillian M Kusch\" \n orcid: \"0000-0003-0078-5621\" \n affiliation: \n - name: \"Memorial University of Newfoundland\" \n department: \"Department of Biology\"\n - name: \"Malgorzata Lagisz\" \n orcid: \"0000-0002-3993-6127\" \n affiliation: \n - name: \"University of New South Wales\" \n department: \"Evolution & Ecology Research Centre and School of Biological, Earth & Environmental Sciences\"\n - name: \"Kristen Marianne Lalla\"\n orcid: \"0000-0003-1422-0672\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Daniel J. Larkin\" \n orcid: \"0000-0001-6378-0495\" \n affiliation: \n - name: \"University of Minnesota-Twin Cities\"\n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Courtney L. Larson\" \n orcid: \"0000-0003-3878-259X\" \n affiliation: \n - name: \"The Nature Conservancy\" \n department: \"\"\n - name: \"Katherine S. Lauck\" \n orcid: \"0000-0003-3303-5050\" \n affiliation: \n - name: \"University of California, Davis\" \n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"M. Elise Lauterbur\" \n orcid: \"0000-0002-7362-3618\" \n affiliation: \n - name: \"University of Arizona\" \n department: \"Ecology and Evolutionary Biology\"\n - name: \"Alan Law\" \n orcid: \"0000-0001-5971-3214\" \n affiliation: \n - name: \"University of Stirling\" \n department: \"Biolgical and Environmental Sciences\"\n - name: \"Don-Jean Léandri-Breton\"\n orcid: \"0000-0003-0547-2966\" \n affiliation: \n - name: \"McGill University\"\n department: \"Department of Natural Resource Sciences\"\n - name: \"Jonas J. Lembrechts\"\n orcid: \"0000-0002-1933-0750\"\n affiliation:\n - name: \"University of Antwerp\"\n department: \"Department of Biology\"\n - name: \"Kiara L'Herpiniere\"\n orcid: \"0000-0003-0322-1266\"\n affiliation:\n - name: \"Macquarie University\"\n department: \"Natural sciences\"\n - name: \"Eva J. P. Lievens\"\n orcid: \"0000-0003-3280-0072\"\n affiliation: \n - name: \"University of Konstanz\"\n department: \"Aquatic Ecology and Evolution Group, Limnological Institute\"\n - name: \"Daniela Oliveira de Lima\" \n orcid: \"0000-0001-6650-2570\" \n affiliation: \n - name: \"Universidade Federal da Fronteira Sul\" \n department: \"Campus Cerro Largo\"\n - name: \"Shane Lindsay\" \n affiliation: \n - name: \"University of Hull\"\n department: \"School of Psychology and Social Work\"\n - name: \"Martin Luquet\"\n orcid: \"0000-0002-4656-4923\"\n affiliation:\n - name: \"Université de Pau et des Pays de l′Adour\"\n department: \"UMR 1224 ECOBIOP\"\n - name: \"Ross MacLeod\"\n orcid: \"0000-0001-5508-0202\"\n affiliation:\n - name: \"Liverpool John Moores University\"\n department: \"School of Biological & Environmental Sciences\"\n - name: \"Kirsty H. Macphie\"\n orcid: \"0000-0002-9824-4833\" \n affiliation: \n - name: \"University of Edinburgh\"\n department: \"Institute of Ecology and Evolution\"\n - name: \"Kit Magellan\"\n orcid: \"\"\n affiliation:\n - name: \"\"\n department: \"\"\n - name: \"Magdalena M. Mair\" \n orcid: \"0000-0003-0074-6067\" \n affiliation: \n - name: \"Bayreuth Center of Ecology and Environmental Research (BayCEER), University of Bayreuth\" \n department: \"Statistical Ecotoxicology\"\n - name: \"Lisa E. Malm\"\n orcid: \"0000-0002-7412-9515\"\n affiliation: \n - name: \"Umeå University\"\n department: \"Ecology and Environmental Science\"\n - name: \"Stefano Mammola\"\n orcid: \"0000-0002-4471-9055\"\n affiliation: \n - name: \"National Research Council of Italy (CNR)\"\n department: \"Molecular Ecology Group (MEG), Water Research Institute (IRSA)\"\n - name: \"Caitlin P. Mandeville\"\n orcid: \"0000-0002-1361-607X\"\n affiliation:\n - name: \"Norwegian University of Science and Technology\"\n department: \"Department of Natural History\"\n - name: \"Michael Manhart\"\n orcid: \"0000-0003-3791-9056\"\n affiliation: \n - name: \"Rutgers University Robert Wood Johnson Medical School\"\n department: \"Center for Advanced Biotechnology and Medicine\"\n - name: \"Laura Milena Manrique-Garzon\"\n orcid: \"0009-0004-4671-6968\"\n affiliation:\n - name: \"Universidad de los Andes\"\n department: \"Departamento de Ciencias Biológicas\"\n - name: \"Elina Mäntylä\" \n orcid: \"0000-0002-2267-7114\"\n affiliation: \n - name: \"University of Turku\"\n department: \"Department of Biology\"\n - name: \"Philippe Marchand\" \n orcid: \"0000-0001-6717-0475\" \n affiliation: \n - name: \"Université du Québec en Abitibi-Témiscamingue\"\n department: \"Institut de recherche sur les forêts\"\n - name: \"Benjamin Michael Marshall\" \n orcid: \"0000-0001-9554-0605\" \n affiliation: \n - name: \"University of Stirling\"\n department: \"Biological and Environmental Sciences\"\n - name: \"Dominic Andreas Martin\"\n orcid: \"0000-0001-7197-2278\"\n affiliation: \n - name: \"University of Bern\"\n department: \"Institute of Plant Sciences\"\n - name: \"Jake Mitchell Martin\"\n orcid: \"0000-0001-9544-9094\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish, and Environmental Studies\"\n - name: \"Charles A. Martin\"\n orcid: \"0000-0003-3185-4634\"\n affiliation: \n - name: \"Université du Québec à Trois-Rivières\"\n department: \"\"\n - name: \"April Robin Martinig\" \n orcid: \"0000-0002-0972-6903\"\n affiliation: \n - name: \"University of New South Wales\"\n department: \"School of Biological, Earth and Environmental Sciences\"\n - name: \"Erin S. McCallum\"\n orcid: \"0000-0001-5426-9652\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Mark McCauley\"\n orcid: \"0000-0001-5347-6860\"\n affiliation:\n - name: \"University of Florida\"\n department: \"Whitney Laboratory for Marine Bioscience\"\n - name: \"Sabrina M. McNew\" \n orcid: \"0000-0002-1345-1674\" \n affiliation: \n - name: \"University of Arizona\"\n department: \"Ecology and Evolutionary Biology\"\n - name: \"Scott J. Meiners\" \n orcid: \"0000-0003-1805-398X\" \n affiliation: \n - name: \"Eastern Illinois University\"\n department: \"Biological Sciences\"\n - name: \"Thomas Merkling\"\n orcid: \"0000-0002-5878-0359\"\n affiliation:\n - name: \"Université de Lorraine, Inserm1433 CIC-P CHRU de Nancy\"\n department: \"Centre d'Investigations Clinique Plurithématique - Institut Lorrain du Coeur et des Vaisseaux\"\n - name: \"Marcus Michelangeli\" \n orcid: \"0000-0002-0053-6759\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\" \n department: \"Department of Wildlife, Fish and Environmental Studies\"\n - name: \"Maria Moiron\" \n orcid: \"0000-0003-0991-1460\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Evolutionary biology department\"\n - name: \"Bruno Moreira\" \n orcid: \"0000-0002-7319-2555\" \n affiliation: \n - name: \"Centro de Investigaciones sobre Desertificación, Consejo Superior de Investigaciones Cientificas (CIDE-CSIC/UV/GV)\" \n department: \"Department of Ecology and global change\"\n - name: \"Jennifer Mortensen\" \n affiliation: \n - name: \"University of Arkansas\" \n department: \"Department of Biological Sciences\"\n - name: \"Benjamin Mos\" \n orcid: \"0000-0003-3687-516X\" \n affiliation: \n - name: \"The University of Queensland\" \n department: \"School of the Environment, Faculty of Science\"\n - name: \"Taofeek Olatunbosun Muraina\" \n orcid: \"0000-0003-2646-2732\" \n affiliation: \n - name: \"Oyo State College of Agriculture and Technology\" \n department: \"Department of Animal Health and Production\"\n - name: \"Penelope Wrenn Murphy\"\n orcid: \"0000-0002-9989-1696\"\n affiliation:\n - name: \"University of Wisconsin-Madison\"\n department: \"Department of Forest & Wildlife Ecology\"\n - name: \"Luca Nelli\"\n orcid: \"0000-0001-6091-4072\" \n affiliation: \n - name: \"University of Glasgow\" \n department: \"School of Biodiversity, One Health and Veterinary Medicine\"\n - name: \"Petri Niemelä\"\n orcid: \"\"\n affiliation:\n - name: \"University of Helsinki\"\n department: \"Organismal and Evolutionary Biology Research Programme, Faculty of Biological and Environmental Sciences\"\n - name: \"Josh Nightingale\"\n orcid: \"0000-0002-1188-7773\"\n affiliation:\n - name: \"University of Iceland\"\n department: \"South Iceland Research Centre\"\n - name: \"Gustav Nilsonne\"\n orcid: \"0000-0001-5273-0150\" \n affiliation: \n - name: \"Karolinska Institutet\"\n department: \"Department of Clinical Neuroscience\"\n - name: \"Sergio Nolazco\"\n orcid: \"0000-0003-2625-9283\"\n affiliation: \n - name: \"Monash University\"\n department: \"School of Biological Sciences\"\n - name: \"Sabine S. Nooten\"\n orcid: \"0000-0002-1798-315X\"\n affiliation: \n - name: \"University of Würzburg\"\n department: \"Animal Ecology and Tropical Biology\"\n - name: \"Jessie Lanterman Novotny\" \n orcid: \"0000-0001-5079-4070\"\n affiliation: \n - name: \"Hiram College\"\n department: \"Biology\"\n - name: \"Agnes Birgitta Olin\"\n orcid: \"0000-0002-8508-3911\"\n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Aquatic Resources\"\n - name: \"Chris L. Organ\" \n affiliation: \n - name: \"Montana State University\" \n department: \"Department of Earth Sciences\"\n - name: \"Kate L Ostevik\" \n orcid: \"0000-0002-2197-9284\" \n affiliation: \n - name: \"University of California Riverside\"\n department: \"Department of Evolution, Ecology, and Organismal Biology\"\n - name: \"Facundo Xavier Palacio\"\n orcid: \"0000-0002-6536-1400\"\n affiliation: \n - name: \"Universidad Nacional de La Plata\" \n department: \"Sección Ornitología\"\n - name: \"Matthieu Paquet\" \n orcid: \"0000-0003-1182-2299\" \n affiliation: \n - name: \"Swedish University of Agricultural Sciences\"\n department: \"Department of Ecology\"\n - name: \"Darren James Parker\"\n orcid: \"0000-0003-4027-7779\"\n affiliation:\n - name: \"Bangor University\"\n department: \"\"\n - name: \"David J Pascall\" \n orcid: \"0000-0002-7543-0860\" \n affiliation: \n - name: \"University of Cambridge\" \n department: \"MRC Biostatistics Unit\"\n - name: \"Valerie J. Pasquarella\" \n orcid: \"0000-0003-1258-6195\" \n affiliation: \n - name: \"Harvard University\"\n department: \"Harvard Forest\"\n - name: \"John Harold Paterson\"\n orcid: \"\"\n affiliation:\n - name: \"University of Stirling\"\n department: \"Biological and Environmental Sciences\"\n - name: \"Ana Payo-Payo \" \n orcid: \"0000-0001-5482-242X\"\n affiliation: \n - name: \"Universidad Complutense de Madrid\"\n department: \"Departamento de Biodiversidad, Ecología y Evolución.\"\n - name: \"Karen Marie Pedersen\"\n orcid: \"0000-0003-0460-1420\"\n affiliation: \n - name: \"Technische Universität Darmstadt\"\n department: \"Biology department\"\n - name: \"Grégoire Perez\"\n orcid: \"0000-0001-8861-4856\"\n affiliation: \n - name: \"CIRAD\"\n department: \"UMR 1309 ASTRE, CIRAD – INRAE\"\n - name: \"Kayla I. Perry\"\n orcid: \"0000-0001-9903-8057\"\n affiliation: \n - name: \"The Ohio State University\"\n department: \"Department of Entomology\"\n - name: \"Patrice Pottier\" \n orcid: \"0000-0003-2106-6597\"\n affiliation: \n - name: \"The University of New South Wales\" \n department: \"Evolution & Ecology Research Centre, School of Biological, Earth and Environmental Sciences\"\n - name: \"Michael J. Proulx\"\n orcid: \"0000-0003-4066-3645\" \n affiliation: \n - name: \"University of Bath\"\n department: \"Department of Psychology\"\n - name: \"Raphaël Proulx\"\n orcid: \"0000-0002-9188-9225\"\n affiliation: \n - name: \"Université du Québec à Trois-Rivières\"\n department: \"Chaire de recherche en intégrité écologique\"\n - name: \"Jessica L Pruett\" \n affiliation: \n - name: \"University of Southern Mississippi\" \n department: \"Mississippi Based RESTORE Act Center of Excellence\"\n - name: \"Veronarindra Ramananjato\" \n orcid: \"0000-0003-2398-3671\" \n affiliation: \n - name: \"University of California Berkeley\" \n department: \"Department of Integrative Biology\"\n - name: \"Finaritra Tolotra Randimbiarison\" \n affiliation: \n - name: \"Université d'Antananarivo\" \n department: \"Mention Zoologie et Biodiversité Animale\"\n - name: \"Onja H. Razafindratsima\" \n orcid: \"0000-0003-1655-6647\" \n affiliation: \n - name: \"University of California, Berkeley\"\n department: \"Department of Integrative Biology \"\n - name: \"Diana J. Rennison\" \n orcid: \"0000-0002-5944-0743\" \n affiliation: \n - name: \"University of California San Diego\" \n department: \"Department of Ecology, Behavior and Evolution\"\n - name: \"Federico Riva\" \n orcid: \"0000-0002-1724-4293\" \n affiliation: \n - name: \"VU Amsterdam\" \n department: \"Institute for Environmental Sciences\"\n - name: \"Sepand Riyahi Riyahi\" \n orcid: \"0000-0003-3317-3576\" \n affiliation: \n - name: \"University of Vienna\" \n department: \"Department of Evolutionary Anthropology\"\n - name: \"Michael James Roast\"\n orcid: \"0000-0002-9454-7562\" \n affiliation: \n - name: \"University of Veterinary Medicine\"\n department: \"Konrad Lorenz Institute for Ethology\"\n - name: \"Felipe Pereira Rocha\" \n orcid: \"0000-0003-3968-1280\" \n affiliation: \n - name: \"The University of Hong Kong\"\n department: \"School of Biological Sciences\"\n - name: \"Dominique G. Roche\"\n orcid: \"0000-0002-3326-864X\"\n affiliation: \n - name: \"Université de Neuchâtel\"\n department: \"Institut de biologie\"\n - name: \"Cristian Román-Palacios\" \n orcid: \"0000-0003-1696-4886\" \n affiliation: \n - name: \"University of Arizona\" \n department: \"School of Information\"\n - name: \"Michael S. Rosenberg\" \n orcid: \"0000-0001-7882-2467\"\n affiliation: \n - name: \"Virginia Commonwealth University\"\n department: \"Center for Biological Data Science\"\n - name: \"Jessica Ross\" \n orcid: \"0000-0003-4124-3116\" \n affiliation: \n - name: \"University of Wisconsin\" \n department: \"\"\n - name: \"Freya E Rowland\" \n orcid: \"0000-0002-1041-5301\" \n affiliation: \n - name: \"Yale University\" \n department: \"School of the Environment\"\n - name: \"Deusdedith Rugemalila\" \n orcid: \"0000-0002-7473-4301\" \n affiliation: \n - name: \"Florida International University\" \n department: \"Institute of the Environment\"\n - name: \"Avery L. Russell\" \n orcid: \"0000-0001-8036-2711\" \n affiliation: \n - name: \"Missouri State University\"\n department: \"Department of Biology\"\n - name: \"Suvi Ruuskanen\" \n orcid: \"0000-0001-5582-9455\"\n affiliation: \n - name: \"University of Jyväskylä \" \n department: \"Department of Biological and Environmental Science\"\n - name: \"Patrick Saccone\" \n orcid: \"0000-0001-8820-593X\" \n affiliation: \n - name: \"OeAW (Austrian Academy of Sciences)\" \n department: \"Institute for Interdisciplinary Mountain Research\"\n - name: \"Asaf Sadeh\" \n orcid: \"0000-0002-2704-4033\" \n affiliation: \n - name: \"Newe Ya'ar Research Center, Agricultural Research Organization (Volcani Institute)\" \n department: \"Department of Natural Resources\"\n - name: \"Stephen M Salazar\" \n orcid: \"0000-0002-5437-0280\" \n affiliation: \n - name: \"Bielefeld University\"\n department: \"Department of Animal Behaviour\"\n - name: \"kris sales\" \n orcid: \"0000-0002-7568-2507\" \n affiliation: \n - name: \"Office for National Statistics\" \n department: \"\"\n - name: \"Pablo Salmón\" \n orcid: \"0000-0001-9718-6611\" \n affiliation: \n - name: \"Institute of Avian Research 'Vogelwarte Helgoland'\" \n department: \"\"\n - name: \"Alfredo Sánchez-Tójar\" \n orcid: \"0000-0002-2886-0649\" \n affiliation: \n - name: \"Bielefeld University\" \n department: \"Department of Evolutionary Biology\"\n - name: \"Leticia Pereira Santos\" \n affiliation: \n - name: \"Universidade Federal de Goiás\"\n department: \"Ecology Department\"\n - name: \"Francesca Santostefano\"\n orcid: \"0000-0003-3308-6552\"\n affiliation:\n - name: \"University of Exeter\"\n department: \"University of Exeter\"\n - name: \"Hayden T. Schilling\" \n orcid: \"0000-0002-7291-347X\" \n affiliation: \n - name: \"New South Wales Department of Primary Industries Fisheries\" \n department: \"\"\n - name: \"Marcus Schmidt\"\n orcid: \"0000-0002-5546-5521\"\n affiliation: \n - name: \"Leibniz Centre for Agricultural Landscape Research (ZALF)\"\n department: \"Research Data Management\"\n - name: \"Tim Schmoll\" \n orcid: \"0000-0003-3234-7335\"\n affiliation: \n - name: \"Bielefeld University\"\n department: \"Evolutionary Biology\"\n - name: \"Adam C. Schneider\" \n orcid: \"0000-0002-4249-864X\"\n affiliation: \n - name: \"University of Wisconsin-La Crosse\"\n department: \"Biology Department\"\n - name: \"Allie E Schrock\" \n orcid: \"0000-0001-5825-6306\" \n affiliation: \n - name: \"Duke University\"\n department: \"Department of Evolutionary Anthropology\"\n - name: \"Julia Schroeder\" \n orcid: \"0000-0002-4136-843X\"\n affiliation: \n - name: \"Imperial College London\"\n department: \"Department of Life Sciences\"\n - name: \"Nicolas Schtickzelle\"\n orcid: \"0000-0001-7829-5361\" \n affiliation: \n - name: \"UCLouvain\" \n department: \"Earth and Life Institute, Ecology and Biodiversity\"\n - name: \"Nick L. Schultz\" \n orcid: \"0000-0002-6760-9481\" \n affiliation: \n - name: \"Federation University Australia\"\n department: \"Future Regions Research Centre\"\n - name: \"Drew A. Scott\"\n orcid: \"0000-0003-0361-9522\"\n affiliation: \n - name: \"USDA - Agricultural Research Service\" \n department: \"Northern Great Plains Research Laboratory\"\n - name: \"Michael Peter Scroggie\"\n orcid: \"0000-0001-9441-6565\"\n affiliation:\n - name: \"Arthur Rylah Insitute for Environmental Research\"\n - name: \"Julie Teresa Shapiro\"\n orcid: \"0000-0002-4539-650X\"\n affiliation:\n - name: \"University of Lyon - French Agency for Food, Environmental and Occupational Health and Safety (ANSES)\"\n department: \"Epidemiology and Surveillance Support Unit\"\n - name: \"Nitika Sharma Sharma\" \n orcid: \"0000-0002-7411-5594\" \n affiliation: \n - name: \"University of California Los Angeles\" \n department: \"UCLA Anderson Center for Impact\"\n - name: \"Caroline L Shearer\"\n orcid: \"0000-0001-7886-9302\"\n affiliation: \n - name: \"Duke University\"\n department: \"Department of Evolutionary Anthropology\"\n - name: \"Diego Simón\"\n orcid: \"0000-0002-6317-3991\"\n affiliation:\n - name: \"Universidad de la República\"\n department: \"Facultad de Ciencias\"\n - name: \"Michael I. Sitvarin\" \n orcid: \"0000-0002-3080-3619\" \n affiliation: \n - name: \"Independent researcher\" \n department: \"\"\n - name: \"Fabrício Luiz Skupien\"\n orcid: \"0000-0003-1991-7102\"\n affiliation: \n - name: \"Universidade Federal do Rio de Janeiro\"\n department: \"Programa de Pós-Graduação em Ecologia, Instituto de Biologia, Centro de Ciências da Saúde\"\n - name: \"Heather Lea Slinn\" \n affiliation: \n - name: \"Vive Crop Protection\"\n department: \"\"\n - name: \"Jeremy A Smith\" \n orcid: \"0000-0002-4942-8310\" \n affiliation: \n - name: \"British Trust for Ornithology\"\n department: \"\"\n - name: \"Grania Polly Smith\"\n affiliation: \n - name: \"University of Cambridge\"\n department: \"\"\n - name: \"Rahel Sollmann\"\n orcid: \"0000-0002-1607-2039\"\n affiliation: \n - name: \"University of California Davis\"\n department: \"Department of Wildlife, Fish, and Conservation Biology\"\n - name: \"Kaitlin Stack Whitney\" \n orcid: \"0000-0002-0815-5037\" \n affiliation: \n - name: \"Rochester Institute of Technology\"\n department: \"Science, Technology & Society Department\"\n - name: \"Shannon Michael Still\" \n orcid: \"0000-0002-7370-1217\"\n affiliation: \n - name: \"Nomad Ecology\"\n department: \"\"\n - name: \"Erica F. Stuber\"\n orcid: \"0000-0002-2687-6874\"\n affiliation: \n - name: \"Utah State University\"\n department: \"Wildland Resources Department\"\n - name: \"Guy F. Sutton\"\n orcid: \"0000-0003-2405-0945\" \n affiliation: \n - name: \"Rhodes University\"\n department: \"Center for Biological Control, Department of Zoology and Entomology\"\n - name: \"Ben Swallow\" \n orcid: \"0000-0002-0227-2160\" \n affiliation: \n - name: \"University of St Andrews\" \n department: \"School of Mathematics and Statistics and Centre for Research in Ecological and Environmental Modelling\"\n - name: \"Conor Claverie Taff\" \n orcid: \"0000-0003-1497-7990\" \n affiliation: \n - name: \"Cornell University\" \n department: \"Department of Ecology and Evolutionary Biology\"\n - name: \"Elina Takola\" \n orcid: \"0000-0003-1268-5513\" \n affiliation: \n - name: \"Helmholtz Centre for Environmental Research – UFZ\" \n department: \"Department of Computational Landscape Ecology\"\n - name: \"Andrew J Tanentzap\" \n orcid: \"0000-0002-2883-1901\" \n affiliation: \n - name: \"Trent University\"\n department: \"Ecosystems and Global Change Group, School of the Environment\"\n - name: \"Rocío Tarjuelo\"\n orcid: \"0000-0002-0638-1911\"\n affiliation:\n - name: \"Universidad de Valladolid\"\n department: \"Instituto Universitario de Investigación en Gestión Forestal Sostenible (iuFOR)\"\n - name: \"Richard J. Telford\"\n orcid: \"0000-0001-9826-3076\"\n affiliation:\n - name: \"University of Bergen\"\n department: \"Department of Biological Sciences\"\n - name: \"Christopher J. Thawley\" \n orcid: \"0000-0002-6040-2613\" \n affiliation: \n - name: \"University of Rhode Island\" \n department: \"Department of Biological Science\"\n - name: \"Hugo Thierry\"\n orcid: \"\"\n affiliation:\n - name: \"McGill University\"\n department: \"Department of Geography\"\n - name: \"Jacqueline Thomson\"\n orcid: \"\"\n affiliation:\n - name: \"University of Guelph\"\n department: \"Integrative Biology\"\n - name: \"Svenja Tidau\"\n orcid: \"0000-0003-0336-0450\"\n affiliation:\n - name: \"University of Plymouth\"\n department: \"School of Biological and Marine Sciences\"\n - name: \"Emily M. Tompkins\"\n orcid: \"0000-0002-1383-2039\"\n affiliation:\n - name: \"Wake Forest University\"\n department: \"Biology Deptartment\"\n - name: \"Claire Marie Tortorelli\" \n orcid: \"0000-0001-9493-9817\" \n affiliation: \n - name: \"University of California, Davis\"\n department: \"Plant Sciences\"\n - name: \"Andrew Trlica\"\n orcid: \"0000-0001-7692-323X\"\n affiliation: \n - name: \"North Carolina State University\"\n department: \"College of Natural Resources\"\n - name: \"Biz R Turnell\" \n orcid: \"0000-0002-1068-304X\" \n affiliation: \n - name: \"Technische Universität Dresden\" \n department: \"Institute of Zoology\"\n - name: \"Lara Urban\"\n orcid: \"0000-0002-5445-9314\"\n affiliation: \n - name: \"Helmholtz Zentrum Muenchen\"\n department: \"Helmholtz AI\"\n - name: \"Jessica Eva Megan van der Wal\"\n orcid: \"0000-0002-6441-3598\"\n affiliation:\n - name: \"University of Cape Town\"\n department: \"FitzPatrick Institute of African Ornithology\"\n - name: \"Jens Van Eeckhoven\"\n orcid: \"0000-0001-8407-4290\"\n affiliation:\n - name: \"University College London\"\n department: \"Department of Cell & Developmental Biology, Division of Biosciences\"\n - name: \"Stijn Van de Vondel\" \n orcid: \"0000-0002-0223-7330\" \n affiliation: \n - name: \"University of Antwerp\"\n department: \"Department of Biology\"\n - name: \"Francis van Oordt\" \n orcid: \"0000-0002-8471-235X\" \n affiliation: \n - name: \"McGill University\" \n department: \"Natural Resource Sciences\"\n - name: \"Mark C. Vanderwel\"\n affiliation: \n - name: \"University of Regina\" \n department: \"Department of Biology\"\n - name: \"K. Michelle Vanderwel\" \n affiliation: \n - name: \"University of Saskatchewan\" \n department: \"Biology\"\n - name: \"Karen J Vanderwolf\" \n orcid: \"0000-0003-0963-3093\" \n affiliation: \n - name: \"University of Waterloo\" \n department: \"Biology\"\n - name: \"Juliana Vélez\"\n orcid: \"0000-0003-0412-2761\"\n affiliation:\n - name: \"University of Minnesota\"\n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Diana Carolina Vergara-Florez\"\n orcid: \"0000-0001-9349-4027\"\n affiliation:\n - name: \"University of Michigan\"\n department: \"Department of Ecology & Evolutionary Biology\"\n - name: \"Brian C. Verrelli\" \n orcid: \"0000-0002-9670-4920\" \n affiliation: \n - name: \"Virginia Commonwealth University\" \n department: \"Center for Biological Data Science\"\n - name: \"Nora Villamil\" \n orcid: \"0000-0002-6957-2248\" \n affiliation: \n - name: \"Public Health Scotland\" \n department: \"Lothian Analytical Services\"\n - name: \"Marcus Vinícius Vieira\"\n orcid: \"0000-0002-4472-5447\" \n affiliation: \n - name: \"Universidade Federal do Rio de Janeiro\" \n department: \"Dept. Ecologia, Instituto de Biologia\"\n - name: \"Nora Villamil\"\n orcid: \"0000-0002-6957-2248\"\n affiliation:\n - name: \"Public Health Scotland\"\n department: \"Lothian Analytical Services\"\n - name: \"Valerio Vitali\"\t \n orcid: \"0000-0003-3593-1510\"\n affiliation:\n - name: \"University of Muenster\"\n department: \"Institute for Evolution and Biodiversity\"\n - name: \"Julien Vollering\" \n orcid: \"0000-0002-7409-2898\" \n affiliation: \n - name: \"Western Norway University of Applied Sciences \" \n department: \"Department of Environmental Sciences\"\n - name: \"Jeffrey Walker\"\n orcid: \"0000-0003-2864-7036\"\n affiliation:\n - name: \"University of Southern Maine\"\n department: \"Department of Biological Sciences\"\n - name: \"Xanthe J Walker\" \n orcid: \"0000-0002-2448-691X\" \n affiliation: \n - name: \"Northern Arizona University\" \n department: \"Center for Ecosystem Science and Society\"\n - name: \"Jonathan A. Walter\" \n orcid: \"0000-0003-2983-751X\" \n affiliation: \n - name: \"University of California Davis\"\n department: \"Center for Watershed Sciences\"\n - name: \"Pawel Waryszak\" \n orcid: \"0000-0002-4245-3150\" \n affiliation: \n - name: \"University of Southern Queensland\"\n department: \"School of Agriculture and Environmental Science\"\n - name: \"Ryan J. Weaver\" \n orcid: \"0000-0002-6160-4735\" \n affiliation: \n - name: \"Iowa State University\" \n department: \"Department of Ecology, Evolution, and Organismal Biology\"\n - name: \"Ronja E. M. Wedegärtner\"\n orcid: \"0000-0003-4633-755X\"\n affiliation:\n - name: \"Fram Project AS\"\n department: \"\"\n - name: \"Daniel L. Weller\" \n orcid: \"0000-0001-7259-6331\" \n affiliation: \n - name: \"Virginia Polytechnic Institute and State University\" \n department: \"Department of Food Science & Technology\"\n - name: \"Shannon Whelan\" \n orcid: \"0000-0003-2862-327X\" \n affiliation: \n - name: \"McGill University\" \n department: \"Department of Natural Resource Sciences\"\n - name: \"Rachel Louise White\" \n orcid: \"0000-0003-0694-7847\" \n affiliation: \n - name: \"University of Brighton\" \n department: \"School of Applied Sciences\"\n - name: \"David William Wolfson\" \n orcid: \"0000-0003-1098-9206\" \n affiliation: \n - name: \"University of Minnesota\" \n department: \"Department of Fisheries, Wildlife and Conservation Biology\"\n - name: \"Andrew Wood\" \n orcid: \"0000-0001-6863-0824\" \n affiliation: \n - name: \"University of Oxford\" \n department: \"Department of Biology\"\n - name: \"Scott W. Yanco\"\n orcid: \"0000-0003-4717-9370\" \n affiliation: \n - name: \"University of Colorado Denver\" \n department: \"Department of Integrative Biology\"\n - name: \"Jian D. L. Yen\" \n orcid: \"0000-0001-7964-923X\" \n affiliation: \n - name: \"Arthur Rylah Institute for Environmental Research\" \n department: \"\"\n - name: \"Casey Youngflesh\" \n orcid: \"0000-0001-6343-3311\" \n affiliation: \n - name: \"Michigan State University\" \n department: \"Ecology, Evolution, and Behavior Program\"\n - name: \"Giacomo Zilio\" \n orcid: \"0000-0002-4448-3118\" \n affiliation: \n - name: \"University of Montpellier, CNRS\" \n department: \"ISEM\"\n - name: \"Cédric Zimmer\" \n orcid: \"0000-0001-8160-2836\" \n affiliation: \n - name: \"Université Sorbonne Paris Nord\"\n department: \"Laboratoire d’Ethologie Expérimentale et Comparée, LEEC, UR4443\"\n - name: \"Gregory Mark Zimmerman\"\n orcid: \"\"\n affiliation:\n - name: \"Lake Superior State University\"\n department: \"Department of Science and Environment\"\n - name: \"Rachel A. Zitomer\" \n orcid: \"0000-0002-1888-1817\"\n affiliation: \n - name: \"Oregon State University\" \n department: \"Department of Forest Ecosystems and Society\"\ncitation:\n type: article-journal\n container-title: \"BMC Biology\"\n issued: \"Date\"\n volume: \"\"\n doi: \"\"\n url: \"\"\n author:\n - name: \"Elliot Gould\"\n - name: \"Hannah S. Fraser\"\n - name: \"Timothy H. Parker\"\n - name: \"Shinichi Nakagawa\"\n - name: \"Simon C. Griffith\"\n - name: \"Peter A. Vesk\"\n - name: \"Fiona Fidler\"\n - name: \"Daniel G. Hamilton\"\n - name: \"Robin N. Abbey-Lee\"\n - name: \"Jessica K. Abbott\"\n - name: \"Luis A. Aguirre\"\n - name: \"Carles Alcaraz\"\n - name: \"Irith Aloni\"\n - name: \"Drew Altschul\"\n - name: \"Kunal Arekar\"\n - name: \"Jeff W. Atkins\"\n - name: \"Joe Atkinson\"\n - name: \"Christopher M. Baker\"\n - name: \"Meghan Barrett\"\n - name: \"Kristian Bell\"\n - name: \"Suleiman Kehinde Bello\"\n - name: \"Iván Beltrán\"\n - name: \"Bernd J. Berauer\"\n - name: \"Michael Grant Bertram\"\n - name: \"Peter D. Billman\"\n - name: \"Charlie K. Blake\"\n - name: \"Shannon Blake\"\n - name: \"Louis Bliard\"\n - name: \"Andrea Bonisoli-Alquati\"\n - name: \"Timothée Bonnet\"\n - name: \"Camille Nina Marion Bordes\"\n - name: \"Aneesh P. H. Bose\"\n - name: \"Thomas Botterill-James\"\n - name: \"Melissa Anna Boyd\"\n - name: \"Sarah A. Boyle\"\n - name: \"Tom Bradfer-Lawrence\"\n - name: \"Jennifer Bradham\"\n - name: \"Jack A. Brand\"\n - name: \"Martin I. Brengdahl\"\n - name: \"Martin Bulla\"\n - name: \"Luc Bussière\"\n - name: \"Ettore Camerlenghi\"\n - name: \"Sara E. Campbell\"\n - name: \"Leonardo L. F. Campos\"\n - name: \"Anthony Caravaggi\"\n - name: \"Pedro Cardoso\"\n - name: \"Charles J. W. Carroll\"\n - name: \"Therese A. Catanach\"\n - name: \"Xuan Chen\"\n - name: \"Heung Ying Janet Chik\"\n - name: \"Emily Sarah Choy\"\n - name: \"Alec Philip Christie\"\n - name: \"Angela Chuang\"\n - name: \"Amanda J. Chunco\"\n - name: \"Bethany L. Clark\"\n - name: \"Andrea Contina\"\n - name: \"Garth A Covernton\"\n - name: \"Murray P. Cox\"\n - name: \"Kimberly A. Cressman\"\n - name: \"Marco Crotti\"\n - name: \"Connor Davidson Crouch\"\n - name: \"Pietro B. D'Amelio\"\n - name: \"Alexandra Allison de Sousa\"\n - name: \"Timm Fabian Döbert\"\n - name: \"Ralph Dobler\"\n - name: \"Adam J. Dobson\"\n - name: \"Tim S. Doherty\"\n - name: \"Szymon Marian Drobniak\"\n - name: \"Alexandra Grace Duffy\"\n - name: \"Alison B. Duncan\"\n - name: \"Robert P. Dunn\"\n - name: \"Jamie Dunning\"\n - name: \"Trishna Dutta\"\n - name: \"Luke Eberhart-Hertel\"\n - name: \"Jared Alan Elmore\"\n - name: \"Mahmoud Medhat Elsherif\"\n - name: \"Holly M. English\"\n - name: \"David C. Ensminger\"\n - name: \"Ulrich Rainer Ernst\"\n - name: \"Ulrich Rainer Ernst\"\n - name: \"Stephen M. Ferguson\"\n - name: \"Esteban Fernandez-Juricic\"\n - name: \"Thalita Ferreira-Arruda Ferreira-Arruda\"\n - name: \"John Fieberg\"\n - name: \"Elizabeth A. Finch\"\n - name: \"Evan A. Fiorenza\"\n - name: \"David N. Fisher\"\n - name: \"Amélie Fontaine\"\n - name: \"Wolfgang Forstmeier\"\n - name: \"Yoan Fourcade\"\n - name: \"Graham S. Frank\"\n - name: \"Cathryn A. Freund\"\n - name: \"Eduardo Fuentes-Lillo\"\n - name: \"Sara L. Gandy\"\n - name: \"Dustin G. Gannon\"\n - name: \"Ana I. García-Cervigón\"\n - name: \"Alexis C. Garretson\"\n - name: \"Xuezhen Ge\"\n - name: \"William L. Geary\"\n - name: \"Charly Géron\"\n - name: \"Charly Géron\"\n - name: \"Marc Gilles\"\n - name: \"Antje Girndt\"\n - name: \"Daniel Gliksman\"\n - name: \"Harrison B. Goldspiel\"\n - name: \"Dylan G. E. Gomes\"\n - name: \"Megan Kate Good\"\n - name: \"Sarah C. Goslee\"\n - name: \"J. Stephen Gosnell\"\n - name: \"Eliza M. Grames\"\n - name: \"Paolo Gratton\"\n - name: \"Nicholas M. Grebe\"\n - name: \"Skye M. Greenler\"\n - name: \"Maaike Griffioen\"\n - name: \"Daniel M. Griffith\"\n - name: \"Frances J. Griffith\"\n - name: \"Jake J. Grossman\"\n - name: \"Ali Güncan\"\n - name: \"Stef Haesen\"\n - name: \"James G. Hagan\"\n - name: \"Heather A. Hager\"\n - name: \"Jonathan Philo Harris\"\n - name: \"Natasha Dean Harrison\"\n - name: \"Sarah Syedia Hasnain\"\n - name: \"Justin Chase Havird\"\n - name: \"Andrew J. Heaton\"\n - name: \"María Laura Herrera-Chaustre\"\n - name: \"Tanner J. Howard\"\n - name: \"Bin-Yan Hsu\"\n - name: \"Fabiola Iannarilli\"\n - name: \"Esperanza C. Iranzo\"\n - name: \"Erik N. K. Iverson\"\n - name: \"Saheed Olaide Jimoh\"\n - name: \"Saheed Olaide Jimoh\"\n - name: \"Douglas H. Johnson\"\n - name: \"Martin Johnsson\"\n - name: \"Jesse Jorna\"\n - name: \"Tommaso Jucker\"\n - name: \"Martin Jung\"\n - name: \"Ineta Kačergytė\"\n - name: \"Oliver Kaltz\"\n - name: \"Alison Ke\"\n - name: \"Clint D. Kelly\"\n - name: \"Katharine Keogan\"\n - name: \"Friedrich Wolfgang Keppeler\"\n - name: \"Alexander K. Killion\"\n - name: \"Dongmin Kim\"\n - name: \"David P. Kochan\"\n - name: \"Peter Korsten\"\n - name: \"Shan Kothari\"\n - name: \"Jonas Kuppler\"\n - name: \"Jillian M. Kusch\"\n - name: \"Malgorzata Lagisz\"\n - name: \"Kristen Marianne Lalla\"\n - name: \"Daniel J. Larkin\"\n - name: \"Courtney L. Larson\"\n - name: \"Katherine S. Lauck\"\n - name: \"M. Elise Lauterbur\"\n - name: \"Alan Law\"\n - name: \"Don-Jean Léandri-Breton\"\n - name: \"Jonas J. Lembrechts\"\n - name: \"Kiara L'Herpiniere\"\n - name: \"Eva J. P. Lievens\"\n - name: \"Daniela Oliveira de Lima\"\n - name: \"Shane Lindsay\"\n - name: \"Martin Luquet\"\n - name: \"Ross MacLeod\"\n - name: \"Kirsty H. Macphie\"\n - name: \"Kit Magellan\"\n - name: \"Magdalena M. Mair\"\n - name: \"Lisa E. Malm\"\n - name: \"Stefano Mammola\"\n - name: \"Caitlin P. Mandeville\"\n - name: \"Michael Manhart\"\n - name: \"Laura Milena Manrique-Garzon\"\n - name: \"Elina Mäntylä\"\n - name: \"Philippe Marchand\"\n - name: \"Benjamin Michael Marshall\"\n - name: \"Charles A. Martin\"\n - name: \"Dominic Andreas Martin\"\n - name: \"Jake Mitchell Martin\"\n - name: \"April Robin Martinig\"\n - name: \"Erin S. McCallum\"\n - name: \"Mark McCauley\"\n - name: \"Sabrina M. McNew\"\n - name: \"Scott J. Meiners\"\n - name: \"Thomas Merkling\"\n - name: \"Marcus Michelangeli\"\n - name: \"Maria Moiron\"\n - name: \"Bruno Moreira\"\n - name: \"Jennifer Mortensen\"\n - name: \"Benjamin Mos\"\n - name: \"Taofeek Olatunbosun Muraina\"\n - name: \"Penelope Wrenn Murphy\"\n - name: \"Luca Nelli\"\n - name: \"Petri Niemelä\"\n - name: \"Josh Nightingale\"\n - name: \"Gustav Nilsonne\"\n - name: \"Sergio Nolazco\"\n - name: \"Sabine S. Nooten\"\n - name: \"Jessie Lanterman Novotny\"\n - name: \"Agnes Birgitta Olin\"\n - name: \"Chris L. Organ\"\n - name: \"Kate L. Ostevik\"\n - name: \"Facundo Xavier Palacio\"\n - name: \"Matthieu Paquet\"\n - name: \"Darren James Parker\"\n - name: \"David J. Pascall\"\n - name: \"Valerie J. Pasquarella\"\n - name: \"John Harold Paterson\"\n - name: \"Ana Payo-Payo\"\n - name: \"Karen Marie Pedersen\"\n - name: \"Grégoire Perez\"\n - name: \"Kayla I. Perry\"\n - name: \"Patrice Pottier\"\n - name: \"Michael J. Proulx\"\n - name: \"Raphaël Proulx\"\n - name: \"Jessica L. Pruett\"\n - name: \"Veronarindra Ramananjato\"\n - name: \"Finaritra Tolotra Randimbiarison\"\n - name: \"Onja H. Razafindratsima\"\n - name: \"Diana J. Rennison\"\n - name: \"Federico Riva\"\n - name: \"Sepand Riyahi Riyahi\"\n - name: \"Michael James Roast\"\n - name: \"Felipe Pereira Rocha\"\n - name: \"Dominique G. Roche\"\n - name: \"Cristian Román-Palacios\"\n - name: \"Michael S. Rosenberg\"\n - name: \"Jessica Ross\"\n - name: \"Freya E. Rowland\"\n - name: \"Deusdedith Rugemalila\"\n - name: \"Avery L. Russell\"\n - name: \"Suvi Ruuskanen\"\n - name: \"Patrick Saccone\"\n - name: \"Asaf Sadeh\"\n - name: \"Stephen M. Salazar\"\n - name: \"kris sales\"\n - name: \"Pablo Salmón\"\n - name: \"Alfredo Sánchez-Tójar\"\n - name: \"Leticia Pereira Santos\"\n - name: \"Francesca Santostefano\"\n - name: \"Hayden T. Schilling\"\n - name: \"Marcus Schmidt\"\n - name: \"Tim Schmoll\"\n - name: \"Adam C. Schneider\"\n - name: \"Allie E. Schrock\"\n - name: \"Julia Schroeder\"\n - name: \"Nicolas Schtickzelle\"\n - name: \"Nick L. Schultz\"\n - name: \"Drew A. Scott\"\n - name: \"Michael Peter Scroggie\"\n - name: \"Julie Teresa Shapiro\"\n - name: \"Nitika Sharma Sharma\"\n - name: \"Caroline L. Shearer\"\n - name: \"Diego Simón\"\n - name: \"Michael I. Sitvarin\"\n - name: \"Fabrício Luiz Skupien\"\n - name: \"Heather Lea Slinn\"\n - name: \"Grania Polly Smith\"\n - name: \"Jeremy A. Smith\"\n - name: \"Rahel Sollmann\"\n - name: \"Kaitlin Stack Whitney\"\n - name: \"Shannon Michael Still\"\n - name: \"Erica F. Stuber\"\n - name: \"Guy F. Sutton\"\n - name: \"Ben Swallow\"\n - name: \"Conor Claverie Taff\"\n - name: \"Elina Takola\"\n - name: \"Andrew J Tanentzap\"\n - name: \"Rocío Tarjuelo\"\n - name: \"Richard J. Telford\"\n - name: \"Christopher J. Thawley\"\n - name: \"Hugo Thierry\"\n - name: \"Jacqueline Thomson\"\n - name: \"Svenja Tidau\"\n - name: \"Mark C. Vanderwel\"\n - name: \"Karen J. Vanderwolf\"\n - name: \"Juliana Vélez\"\n - name: \"Diana Carolina Vergara-Florez\"\n - name: \"Brian C. Verrelli\"\n - name: \"Marcus Vinícius Vieira\"\n - name: \"Nora Villamil\"\n - name: \"Valerio Vitali\"\n - name: \"Julien Vollering\"\n - name: \"Jeffrey Walker\"\n - name: \"Xanthe J. Walker\"\n - name: \"Jonathan A. Walter\"\n - name: \"Pawel Waryszak\"\n - name: \"Ryan J. Weaver\"\n - name: \"Ronja E. M. Wedegärtner\"\n - name: \"Daniel L. Weller\"\n - name: \"Shannon Whelan\"\n - name: \"Rachel Louise White\"\n - name: \"David William Wolfson\"\n - name: \"Andrew Wood\"\n - name: \"Scott W. Yanco\"\n - name: \"Jian D. L. Yen\"\n - name: \"Casey Youngflesh\"\n - name: \"Giacomo Zilio\"\n - name: \"Cédric Zimmer\"\n - name: \"Gregory Mark Zimmerman\"\n - name: \"Rachel A. Zitomer\"\nbibliography: \n - ms/references.bib\n - ms/grateful-refs.bib\nnumber-sections: true\nnumber-depth: 3\ntoc-depth: 3\ntbl-cap-location: top\ndate-modified: last-modified\ngoogle-scholar: true\neditor: \n markdown: \n wrap: sentence\npre-render: utils.R\nexecute:\n freeze: auto\n---\n\n\n\n\n\n\n# Introduction\n\nOne value of science derives from its production of replicable, and thus reliable, results.\nWhen we repeat a study using the original methods we should be able to expect a similar result.\nHowever, perfect replicability is not a reasonable goal.\nEffect sizes will vary, and even reverse in sign, by chance alone [@gelman2009].\nObserved patterns can differ for other reasons as well.\nIt could be that we do not sufficiently understand the conditions that led to the original result so when we seek to replicate it, the conditions differ due to some 'hidden moderator'.\nThis hidden moderator hypothesis is described by meta-analysts in ecology and evolutionary biology as 'true biological heterogeneity' [@senior2016].\nThis idea of true heterogeneity is popular in ecology and evolutionary biology, and there are good reasons to expect it in the complex systems in which we work [@shavit2017].\nHowever, despite similar expectations in psychology, recent evidence in that discipline contradicts the hypothesis that moderators are common obstacles to replicability, as variability in results in a large 'many labs' collaboration was mostly unrelated to commonly hypothesized moderators such as the conditions under which the studies were administered [@klein2018].\nAnother possible explanation for variation in effect sizes is that researchers often present biased samples of results, thus reducing the likelihood that later studies will produce similar effect sizes [@open2015; @parker2016; @forstmeier2017; @fraser2018; @parker2023].\nIt also may be that although researchers did successfully replicate the conditions, the experiment, and measured variables, analytical decisions differed sufficiently among studies to create divergent results [@simonsohn2015; @silberzahn2018].\n\nAnalytical decisions vary among studies because researchers have many options.\nResearchers need to decide how to exclude possibly anomalous or unreliable data, how to construct variables, which variables to include in their models, and which statistical methods to use.\nDepending on the dataset, this short list of choices could encompass thousands or millions of possible alternative specifications [@simonsohn2015].\nHowever, researchers making these decisions presumably do so with the goal of doing the best possible analysis, or at least the best analysis within their current skill set.\nThus it seems likely that some specification options are more probable than others, possibly because they have previously been shown (or claimed) to be better, or because they are more well known.\nOf course, some of these different analyses (maybe many of them) may be equally valid alternatives.\nRegardless, on probably any topic in ecology and evolutionary biology, we can encounter differences in choices of data analysis.\nThe extent of these differences in analyses and the degree to which these differences influence the outcomes of analyses and therefore studies' conclusions are important empirical questions.\nThese questions are especially important given that many papers draw conclusions after applying a single method, or even a single statistical model, to analyze a dataset.\n\nThe possibility that different analytical choices could lead to different outcomes has long been recognized [@gelman2013], and various efforts to address this possibility have been pursued in the literature.\nFor instance, one common method in ecology and evolutionary biology involves creating a set of candidate models, each consisting of a different (though often similar) set of predictor variables, and then, for the predictor variable of interest, averaging the slope across all models (i.e. model averaging) [@burnham2002; @grueber2011].\nThis method reduces the chance that a conclusion is contingent upon a single model specification, though use and interpretation of this method is not without challenges [@grueber2011].\nFurther, the models compared to each other typically differ only in the inclusion or exclusion of certain predictor variables and not in other important ways, such as methods of parameter estimation.\nMore explicit examination of outcomes of differences in model structure, model type, data exclusion, or other analytical choices can be implemented through sensitivity analyses [e.g., @noble2017].\nSensitivity analyses, however, are typically rather narrow in scope, and are designed to assess the sensitivity of analytical outcomes to a particular analytical choice rather than to a large universe of choices.\nRecently, however, analysts in the social sciences have proposed extremely thorough sensitivity analysis, including 'multiverse analysis' [@steegen2016] and the 'specification curve' [@simonsohn2015], as a means of increasing the reliability of results.\nWith these methods, researchers identify relevant decision points encountered during analysis and conduct the analysis many times to incorporate many plausible decisions made at each of these points.\nThe study's conclusions are then based on a broad set of the possible analyses and so allow the analyst to distinguish between robust conclusions and those that are highly contingent on particular model specifications.\nThese are useful outcomes, but specifying a universe of possible modelling decisions is not a trivial undertaking.\nFurther, the analyst's knowledge and biases will influence decisions about the boundaries of that universe, and so there will always be room for disagreement among analysts about what to include.\nIncluding more specifications is not necessarily better.\nSome analytical decisions are better justified than others, and including biologically implausible specifications may undermine this process.\nRegardless, these powerful methods have yet to be adopted, and even more limited forms of sensitivity analyses are not particularly widespread.\nMost studies publish a small set of analyses and so the existing literature does not provide much insight into the degree to which published results are contingent on analytical decisions.\n\nDespite the potential major impacts of analytical decisions on variance in results, the outcomes of different individuals' data analysis choices have only recently begun to receive much empirical attention.\nThe only formal exploration of this that we were aware of when we submitted our Stage 1 manuscript were (1) an analysis in social science that asked whether male professional football (soccer) players with darker skin tone were more likely to be issued red cards (ejection from the game for rule violation) than players with lighter skin tone [@silberzahn2018] and (2) an analysis in neuroimaging which evaluated nine separate hypotheses involving the neurological responses detected with fMRI in 108 participants divided between two treatments in a decision making task [@botvinik-nezer2020].\nSeveral others have been published since [e.g., @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023], and we recently learned of an earlier small study in ecology [@stanton-geddes2014].\nIn the red card study, 29 teams designed and implemented analyses of a dataset provided by the study coordinators [@silberzahn2018].\nAnalyses were peer reviewed (results blind) by at least two other participating analysts; a level of scrutiny consistent with standard pre-publication peer review.\nAmong the final 29 analyses, odds-ratios varied from 0.89 to 2.93, meaning point estimates varied from having players with lighter skin tones receive more red cards (odds ratio \\< 1) to a strong effect of players with darker skin tones receiving more red cards (odds ratio \\> 1).\nTwenty of the 29 teams found a statistically-significant effect in the predicted direction of players with darker skin tones being issued more red cards.\nThis degree of variation in peer-reviewed analyses from identical data is striking, but the generality of this finding has only just begun to be formally investigated [e.g., @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023].\n\nIn the neuroimaging study, 70 teams evaluated each of the nine different hypotheses with the available fMRI data [@botvinik-nezer2020].\nThese 70 teams followed a divergent set of workflows that produced a wide range of results.\nThe rate of reporting of statistically significant support for the nine hypotheses ranged from 21$\\%$to 84$\\%$, and for each hypothesis on average, 20 [correlation of 0.89, @fig-ggpairs-bt] of research teams observed effects that differed substantially from the majority of other teams.\nSome of the variability in results among studies could be explained by analytical decisions such as choice of software package, smoothing function, and parametric versus non-parametric corrections for multiple comparisons.\nHowever, substantial variability among analyses remained unexplained, and presumably emerged from the many different decisions each analyst made in their long workflows.\nSuch variability in results among analyses from this dataset and from the very different red-card dataset suggests that sensitivity of analytical outcome to analytical choices may characterize many distinct fields, as several more recent many-analyst studies also suggest [@huntington-klein2021; @schweinsberg2021; @breznau2022].\n\nTo further develop the empirical understanding of the effects of analytical decisions on study outcomes, we chose to estimate the extent to which researchers' data analysis choices drive differences in effect sizes, model predictions, and qualitative conclusions in ecology and evolutionary biology.\nThis is an important extension of the meta-research agenda of evaluating factors influencing replicability in ecology, evolutionary biology, and beyond [@fidler2017].\nTo examine the effects of analytical decisions, we used two different datasets and recruited researchers to analyze one or the other of these datasets to answer a question we defined.\nThe first question was \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\" To answer this question, we provided a dataset that includes brood size manipulations from 332 broods conducted over three years at Wytham Wood, UK. The second question was \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\" For this question, analysts used a dataset that includes, among other variables, number of seedlings in different size classes, percentage cover of different life forms, tree canopy cover, and distance from canopy edge from 351 quadrats spread among 18 sites in Victoria, Australia.\n\nWe explored the impacts of data analysts' choices with descriptive statistics and with a series of tests to attempt to explain the variation among effect sizes and predicted values of the dependent variable produced by the different analysis teams for both datasets separately.\nTo describe the variability, we present forest plots of the standardized effect sizes and predicted values produced by each of the analysis teams, estimate heterogeneity (both absolute, $\\tau^2$, and proportional, $I^2$) in effect size and predicted values among the results produced by these different teams, and calculate a similarity index that quantifies variability among the predictor variables selected for the different statistical models constructed by the different analysis teams.\nThese descriptive statistics provide the first estimates of the extent to which explanatory statistical models and their outcomes in ecology and evolutionary biology vary based on the decisions of different data analysts.\nWe then quantified the degree to which the variability in effect size and predicted values could be explained by (1) variation in the quality of analyses as rated by peer reviewers and (2) the similarity of the choices of predictor variables between individual analyses.\n\n# Methods\n\nThis project involved a series of steps (1-6) that began with identifying datasets for analyses and continued through recruiting independent groups of scientists to analyze the data, allowing the scientists to analyze the data as they saw fit, generating peer review ratings of the analyses (based on methods, not results), evaluating the variation in effects among the different analyses, and producing the final manuscript.\n\n## **Step 1: Select Datasets**\n\nWe used two previously unpublished datasets, one from evolutionary ecology and the other from ecology and conservation.\n\n**Evolutionary ecology**\n\nOur evolutionary ecology dataset is relevant to a sub-discipline of life-history research which focuses on identifying costs and trade-offs associated with different phenotypic conditions.\nThese data were derived from a brood-size manipulation experiment imposed on wild birds nesting in boxes provided by researchers in an intensively studied population.\nUnderstanding how the growth of nestlings is influenced by the numbers of siblings in the nest can give researchers insights into factors such as the evolution of clutch size, determination of provisioning rates by parents, and optimal levels of sibling competition [@vanderwerf1992; @dekogel1997; @royle1999; @verhulst2006; @nicolaus2009].\nData analysts were provided this dataset and instructed to answer the following question: \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\"\n\nResearchers conducted brood size manipulations and population monitoring of blue tits at Wytham Wood, a 380 ha woodland in Oxfordshire, U.K (1º 20'W, 51º 47'N).\nResearchers regularly checked approximately 1100 artificial nest boxes at the site and monitored the 330 to 450 blue tit pairs occupying those boxes in 2001-2003 during the experiment.\nNearly all birds made only one breeding attempt during the April to June study period in a given year.\nAt each blue tit nest, researchers recorded the date the first egg appeared, clutch size, and hatching date.\nFor all chicks alive at age 14 days, researchers measured mass and tarsus length and fitted a uniquely numbered, British Trust for Ornithology (BTO) aluminium leg ring.\nResearchers attempted to capture all adults at their nests between day 6 and day 14 of the chick-rearing period.\nFor these captured adults, researchers measured mass, tarsus length, and wing length and fitted a uniquely numbered BTO leg ring.\nDuring the 2001-2003 breeding seasons, researchers manipulated brood sizes using cross fostering.\nThey matched broods for hatching date and brood size and moved chicks between these paired nests one or two days after hatching.\nThey sought to either enlarge or reduce all manipulated broods by approximately one fourth.\nTo control for effects of being moved, each reduced brood had a portion of its brood replaced by chicks from the paired increased brood, and vice versa.\nNet manipulations varied from plus or minus four chicks in broods of 12 to 16 to plus or minus one chick in broods of 4 or 5.\nResearchers left approximately one third of all broods unmanipulated.\nThese unmanipulated broods were not selected systematically to match manipulated broods in clutch size or laying date.\nWe have mass and tarsus length data from 3720 individual chicks divided among 167 experimentally enlarged broods, 165 experimentally reduced broods, and 120 unmanipulated broods.\nThe full list of variables included in the dataset is publicly available (), along with the data ().\n\n::: {.callout-note appearance=\"simple\"}\n## Additional Explanation: \n\nShortly after beginning to recruit analysts, several analysts noted a small set of related errors in the blue tit dataset.\nWe corrected the errors, replaced the dataset on our OSF site, and emailed the analysts on 19 April 2020 to instruct them to use the revised data.\nThe email to analysts is available here ().\nThe errors are explained in that email.\n:::\n\n**Ecology and conservation**\n\nOur ecology and conservation dataset is relevant to a sub-discipline of conservation research which focuses on investigating how best to revegetate private land in agricultural landscapes.\nThese data were collected on private land under the Bush Returns program, an incentive system where participants entered into a contract with the Goulburn Broken Catchment Management Authority and received annual payments if they executed predetermined restoration activities.\nThis particular dataset is based on a passive regeneration initiative, where livestock grazing was removed from the property in the hopes that the *Eucalyptus* spp.\noverstorey would regenerate without active (and expensive) planting.\nAnalyses of some related data have been published [@miles2008; @vesk2016] but those analyses do not address the question analysts answered in our study.\nData analysts were provided this dataset and instructed to answer the following question: \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\".\n\nResearchers conducted three rounds of surveys at 18 sites across the Goulburn Broken catchment in northern Victoria, Australia in winter and spring 2006 and autumn 2007.\nIn each survey period, a different set of 15 x 15 m quadrats were randomly allocated across each site within 60 m of existing tree canopies.\nThe number of quadrats at each site depended on the size of the site, ranging from four at smaller sites to 11 at larger sites.\nThe total number of quadrats surveyed across all sites and seasons was 351.\nThe number of *Eucalyptus* spp. seedlings was recorded in each quadrat along with information on the GPS location, aspect, tree canopy cover, distance to tree canopy, and position in the landscape.\nGround layer plant species composition was recorded in three 0.5 x 0.5 m sub-quadrats within each quadrat.\nSubjective cover estimates of each species as well as bare ground, litter, rock and moss/lichen/soil crusts were recorded.\nSubsequently, this was augmented with information about the precipitation and solar radiation at each GPS location.\nThe full list of variables included in the dataset is publicly available (), along with the data ().\n\n## **Step 2: Recruitment and Initial Survey of Analysts**\n\nThe lead team (TP, HF, SN, EG, SG, PV, DH, FF) created a publicly available document providing a general description of the project ().\nThe project was advertised at conferences, via Twitter, using mailing lists for ecological societies (including Ecolog, Evoldir, and lists for the Environmental Decisions Group, and Transparency in Ecology and Evolution), and via word of mouth.\nThe target population was active ecology, conservation, or evolutionary biology researchers with a graduate degree (or currently studying for a graduate degree) in a relevant discipline.\nResearchers could choose to work independently or in a small team.\nFor the sake of simplicity, we refer to these as 'analysis teams' though some comprised one individual.\nWe aimed for a minimum of 12 analysis teams independently evaluating each dataset (see sample size justification below).\nWe simultaneously recruited volunteers to peer review the analyses conducted by the other volunteers through the same channels.\nOur goal was to recruit a similar number of peer reviewers and analysts, and to ask each peer reviewer to review a minimum of four analyses.\nIf we were unable to recruit at least half the number of reviewers as analysis teams, we planned to ask analysts to serve also as reviewers (after they had completed their analyses), but this was unnecessary.\nTherefore, no data analysts peer reviewed analyses of the dataset they had analyzed.\nAll analysts and reviewers were offered the opportunity to share co-authorship on this manuscript and we planned to invite them to participate in the collaborative process of producing the final manuscript.\nAll analysts signed \\[digitally\\] a consent (ethics) document () approved by the Whitman College Institutional Review Board prior to being allowed to participate.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\nDue to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft.\n:::\n\nWe identified our minimum number of analysts per dataset by considering the number of effects needed in a meta-analysis to generate an estimate of heterogeneity ($\\tau^{2}$) with a 95$\\%$confidence interval that does not encompass zero.\nThis minimum sample size is invariant regardless of $\\tau^{2}$.\nThis is because the same t-statistic value will be obtained by the same sample size regardless of variance ($\\tau^{2}$).\nWe see this by first examining the formula for the standard error, $\\text{SE}$ for variance, ($\\tau^{2}$) or ($\\text{SE}\\tau^{2}$) assuming normality in an underlying distribution of effect sizes [@knight2000]:\n\n$$\n\\text{SE}({{\\tau}^2})=\\sqrt{\\frac{{2\\tau}^4}{n-1}}\n$$ {#eq-SE-tau}\n\nand then rearranging the above formula to show how the t-statistic is independent of $\\tau^2$, as seen below.\n\n$$\nt=\\frac{{\\tau}^2}{SE({{\\tau}^2})}=\\sqrt{\\frac{n-1}{2}}\n$$ {#eq-t-tau}\n\nWe then find a minimum n = 12 according to this formula.\n\n## **Step 3: Primary Data Analyses**\n\nAnalysis teams registered and answered a demographic and expertise survey ().\nWe then provided them with the dataset of their choice and requested that they answer a specific research question.\nFor the evolutionary ecology dataset that question was \"To what extent is the growth of nestling blue tits (*Cyanistes caeruleus*) influenced by competition with siblings?\" and for the conservation ecology dataset it was \"How does grass cover influence *Eucalyptus* spp. seedling recruitment?\" Once their analysis was complete, they answered a structured survey (), providing analysis technique, explanations of their analytical choices, quantitative results, and a statement describing their conclusions.\nThey also were asked to upload their analysis files (including the dataset as they formatted it for analysis and their analysis code \\[if applicable\\]) and a detailed journal-ready statistical methods section.\n\n::: {.callout-note appearance=\"simple\"}\n## Additional Explanation:\n\nAs is common in many studies in ecology and evolutionary biology, the datasets we provided contained many variables, and the research questions we provided could be addressed by our datasets in many different ways. For instance, volunteer analysts had to choose the dependent (response) variable and the independent variable, and make numerous other decisions about which variables and data to use and how to structure their model.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\nWe originally planned to have analysts complete a single survey (), but after we evaluated the results of that survey, we realized we would need a second survey () to adequately collect the information we needed to evaluate heterogeneity of results (step 5).\nWe provided a set of detailed instructions with the follow-up survey, and these instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ).\n:::\n\n## **Step 4: Peer Reviews of Analyses**\n\nAt minimum, each analysis was evaluated by four different reviewers, and each volunteer peer reviewer was randomly assigned methods sections from at least four analyst teams (the exact number varied).\nEach peer reviewer registered and answered a demographic and expertise survey identical to that asked of the analysts, except we did not ask about 'team name' since reviewers did not work in teams.\nReviewers evaluated the methods of each of their assigned analyses one at a time in a sequence determined by the project leaders.\nWe systematically assigned the sequence so that, if possible, each analysis was allocated to each position in the sequence for at least one reviewer.\nFor instance, if each reviewer were assigned four analyses to review, then each analysis would be the first analysis assigned to at least one reviewer, the second analysis assigned to another reviewer, the third analysis assigned to yet another reviewer, and the fourth analysis assigned to a fourth reviewer.\nBalancing the order in which reviewers saw the analyses controls for order effects, e.g. a reviewer might be less critical of the first methods section they read than the last.\n\nThe process for a single reviewer was as follows.\nFirst, the reviewer received a description of the methods of a single analysis.\nThis included the narrative methods section, the analysis team's answers to our survey questions regarding their methods, including analysis code, and the dataset.\nThe reviewer was then asked, in an online survey (), to rate that analysis on a scale of 0-100 based on this prompt: \"Rate the overall appropriateness of this analysis to answer the research question (*one of the two research questions inserted here*) with the available data. To help you calibrate your rating, please consider the following guidelines:\n\n>\n- 100. A perfect analysis with no conceivable improvements from the reviewer\n- 75. An imperfect analysis but the needed changes are unlikely to dramatically alter outcomes\n- 50. A flawed analysis likely to produce either an unreliable estimate of the relationship or an over-precise estimate of uncertainty\n- 25. A flawed analysis likely to produce an unreliable estimate of the relationship and an over-precise estimate of uncertainty\n- 0. A dangerously misleading analysis, certain to produce both an estimate that is wrong and a substantially over-precise estimate of uncertainty that places undue confidence in the incorrect estimate.\n>\n\\*Please note that these values are meant to calibrate your ratings.\nWe welcome ratings of any number between 0 and 100.\n\nAfter providing this rating, the reviewer was presented with this prompt, in multiple-choice format: \"Would the analytical methods presented produce an analysis that is (a) publishable as is, (b) publishable with minor revision, (c) publishable with major revision, (d) deeply flawed and unpublishable?\" The reviewer was then provided with a series of text boxes and the following prompts: \"Please explain your ratings of this analysis. Please evaluate the choice of statistical analysis type. Please evaluate the process of choosing variables for and structuring the statistical model. Please evaluate the suitability of the variables included in (or excluded from) the statistical model. Please evaluate the suitability of the structure of the statistical model. Please evaluate choices to exclude or not exclude subsets of the data. Please evaluate any choices to transform data (or, if there were no transformations, but you think there should have been, please discuss that choice).\" After submitting this review, a methods section from a second analysis was then made available to the reviewer.\nThis same sequence was followed until all analyses allocated to a given reviewer were provided and reviewed.\nAfter providing the final review, the reviewer was simultaneously provided with all four (or more) methods sections the reviewer had just completed reviewing, the option to revise their original ratings, and a text box to provide an explanation.\nThe invitation to revise the original ratings was as follows: \"If, now that you have seen all the analyses you are reviewing, you wish to revise your ratings of any of these analyses, you may do so now.\" The text box was prefaced with this prompt: \"Please explain your choice to revise (or not to revise) your ratings.\"\n\n::: {.callout-note appearance=\"simple\"}\n## Additional explanation: Unregistered analysis.\n\nTo determine how consistent peer reviewers were in their ratings, we assessed inter-rater reliability among reviewers for both the categorical and quantitative ratings combining blue tit and *Eucalyptus* data using Krippendorff's alpha for ordinal and continuous data respectively.\nThis provides a value that is between -1 (total disagreement between reviewers) and 1 (total agreement between reviewers).\n:::\n\n## **Step 5: Evaluate Variation**\n\nThe lead team conducted the analyses outlined in this section.\nWe described the variation in model specification in several ways.\nWe calculated summary statistics describing variation among analyses, including mean, $\\text{SD}$, and range of number of variables per model included as fixed effects, the number of interaction terms, the number of random effects, and the mean, $\\text{SD}$, and range of sample sizes.\nWe also present the number of analyses in which each variable was included.\nWe summarized the variability in standardized effect sizes and predicted values of dependent variables among the individual analyses using standard random effects meta-analytic techniques.\nFirst, we derived standardized effect sizes from each individual analysis.\nWe did this for all linear models or generalized linear models by converting the $t$ value and the degree of freedom ($\\mathit{df}$) associated with regression coefficients (e.g. the effect of the number of siblings \\[predictor\\] on growth \\[response\\] or the effect of grass cover \\[predictor\\] on seedling recruitment \\[response\\]) to the correlation coefficient, $r$, using the following:\n\n$$\nr=\\sqrt{\\frac{{t}^2}{\\left({{t}^2}+\\mathit{df}\\right) }}\n$$ {#eq-t-to-r}\n\nThis formula can only be applied if $t$ and $\\mathit{df}$ values originate from linear or generalized linear models [GLMs; @nakagawa2007].\nIf, instead, linear mixed-effects models (LMMs) or generalized linear mixed-effects models (GLMMs) were used by a given analysis, the exact $\\mathit{df}$ cannot be estimated.\nHowever, adjusted $\\mathit{df}$ can be estimated, for example, using the Satterthwaite approximation of $\\mathit{df}$, $\\mathit{df}_S$, [note that SAS uses this approximation to obtain $\\mathit{df}$ for LMMs and GLMMs; @luke2017].\nFor analyses using either LMMs or GLMMs that do not produce $\\mathit{df}_S$ we planned to obtain $\\mathit{df}_S$ by rerunning the same (G)LMMs using the `lmer()` or `glmer()` function in the *lmerTest* package in R [@kuznetsova2017; @base].\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\nRather than re-run these analyses ourselves, we sent a follow-up survey (referenced above under \"Primary data analyses\") to analysts and asked them to follow our instructions for producing this information.\nThe instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ).\n:::\n\nWe then used the $t$ values and $\\mathit{df}_S$ from the models to obtain $r$ as per the formula above.\nAll $r$ and accompanying $\\mathit{df}$ (or $\\mathit{df}_S$) were converted to Fisher's $Z_r$ \n\n$$\nZ_r = \\frac{1}{2} \\ln(\\dfrac{1+r}{1-r})\n$$ {#eq-Zr}\n\nand its sampling variance; $1/(n – 3)$ where $n = df + 1$.\nAny analyses from which we could not derive a signed $Z_r$, for instance one with a quadratic function in which the slope changed sign, were considered unusable for analyses of $Z_r$ .\nWe expected such analyses would be rare.\nIn fact, most submitted analyses excluded from our meta-analysis of $Z_r$ were excluded because of a lack of sufficient information provided by the analyst team rather than due to the use of effects that could not be converted to $Z_r$.\nRegardless, as we describe below, we generated a second set of standardized effects (predicted values) that could (in principle) be derived from any explanatory model produced by these data.\n\nBesides $Z_r$, which describes the strength of a relationship based on the amount of variation in a dependent variable explained by variation in an independent variable, we also examined differences in the shape of the relationship between the independent and dependent variables.\nTo accomplish this, we derived a point estimate (out-of-sample predicted value) for the dependent variable of interest for each of three values of our primary independent variable.\nWe originally described these three values as associated with the 25th percentile, median, and 75th percentile of the independent variable and any covariates.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\nThe original description of the out-of-sample specifications did not account for the facts that (a) some variables are not distributed in a way that allowed division in percentiles and that (b) variables could be either positively or negatively correlated with the dependent variable.\nWe provide a more thorough description here: We derived three point-estimates (out-of-sample predicted values) for the dependent variable of interest; one for each of three values of our primary independent variable that we specified.\nWe also specified values for all other variables that could have been included as independent variables in analysts' models so that we could derive the predicted values from a fully specified version of any model produced by analysts.\nFor all potential independent variables, we selected three values or categories.\nOf the three we selected, one was associated with small, one with intermediate, and one with large values of one typical dependent variable (day 14 chick weight for the blue tit data and total number of seedlings for the *Eucalyptus* data; analysts could select other variables as their dependent variable, but the others typically correlated with the two identified here).\nFor continuous variables, this means we identified the 25th percentile, median, and 75th percentile and, if the slope of the linear relationship between this variable and the typical dependent variable was positive, we left the quartiles ordered as is.\nIf, instead, the slope was negative, we reversed the order of the independent variable quartiles so that the 'lower' quartile value was the one associated with the lower value for the dependent variable.\nIn the case of categorical variables, we identified categories associated with the 25th percentile, median, and 75th percentile values of the typical dependent variable after averaging the values for each category.\nHowever, for some continuous and categorical predictors, we also made selections based on the principle of internal consistency between certain related variables, and we fixed a few categorical variables as identical across all three levels where doing so would simplify the modelling process (specification tables available: blue tit: ; *Eucalyptus*: ).\n:::\n\nWe used the 25th and 75th percentiles rather than minimum and maximum values to reduce the chance of occupying unrealistic parameter space.\nWe planned to derive these predicted values from the model information provided by the individual analysts.\nAll values (predictions) were first transformed to the original scale along with their standard errors ($\\text{SE}$); we used the delta method [@verhoef2012] for the transformation of $\\text{SE}$.\nWe used the square of the $\\text{SE}$ associated with predicted values as the sampling variance in the meta-analyses described below, and we planned to analyze these predicted values in exactly the same ways as we analyzed $Z_r$ in the following analyses.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\n**1. Standardizing blue tit out-of-sample predictions** $y_i$\n\nBecause analysts of blue tit data chose different dependent variables on different scales, after transforming out-of-sample values to the original scales, we standardized all values as z scores ('standard scores') to put all dependent variables on the same scale and make them comparable. This involved taking each relevant value on the original scale (whether a predicted point estimate or a $\\text{SE}$ associated with that estimate) and subtracting the value in question from the mean value of that dependent variable derived from the full dataset and then dividing this difference by the standard deviation, $\\text{SD}$, corresponding to the mean from the full dataset (@eq-Z-VZ). Thus, all our out-of-sample prediction values from the blue tit data are from a distribution with the mean of 0 and $\\text{SD}$ of 1.\n\nNote that we were unable to standardise some analyst-constructed variables, so these analyses were excluded from the final out-of-sample estimates meta-analysis, see @sec-excluded-yi for details and explanation.\n\n**2. Log-transforming *Eucalyptus* out-of-sample predictions** $y_i$\n\nAll analyses of the *Eucalyptus* data chose dependent variables that were on the same scale, that is, *Eucalyptus* seedling counts. Although analysts may have used different size-classes of *Eucalyptus* seedlings for their dependent variable, we considered these choices to be akin to subsetting, rather than as different response variables, since changing the size-class of the dependent variable ultimately results in observations being omitted or included. Consequently, we did not standardise *Eucalyptus* out-of-sample predictions.\n\nWe were unable to fit quasi-Poisson or Poisson meta-regressions, as desired [@ohara2010], because available meta-analysis packages (e.g. `metafor::` and `metainc::`) do not provide implementation for outcomes as estimates-only, methods are only provided for outcomes as ratios or rate-differences between two groups. Consequently, we log-transformed the out-of-sample predictions for the *Eucalyptus* data and use the mean estimate for each prediction scenario as the dependent variable in our meta-analysis with the associated $\\text{SE}$ as the sampling variance in the meta-analysis [@nakagawa2023, Table 2]. \n:::\n\nWe plotted individual effect size estimates ($Z_r$) and predicted values of the dependent variable ($y_i$) and their corresponding 95$\\%$confidence / credible intervals in forest plots to allow visualization of the range and precision of effect size and predicted values.\nFurther, we included these estimates in random effects meta-analyses [@higgins2003; @borenstein2017] using the *metafor* package in R [@metafor; @base]:\n\n$$\nZ_r \\sim 1 + \\left(1 \\vert \\text{analysisID} \\right)\n$$ {#eq-MA_Zr}\n\n$$ \ny_i \\sim 1 + \\left(1 \\vert \\text{analysisID} \\right)\n$$ {#eq-MA_yi}\n\nwhere $y_i$ is the predicted value for the dependent variable at the 25th percentile, median, or 75th percentile of the independent variables.\nThe individual $Z_r$ effect sizes were weighted with the inverse of sampling variance for $Z_r$.\nThe individual predicted values for dependent variable ($y_i$) were weighted by the inverse of the associated $\\text{SE}^2$ (original registration omitted \"inverse of the\" in error).\nThese analyses provided an average $Z_r$ score or an average $y_i$ with corresponding 95$\\%$confidence interval and allowed us to estimate two heterogeneity indices, $\\tau^2$ and $I^2$.\nThe former, $\\tau^2$, is the absolute measure of heterogeneity or the between-study variance (in our case, between-effect variance) whereas $I^2$ is a relative measure of heterogeneity.\nWe obtained the estimate of relative heterogeneity ($I^2$) by dividing the between-effect variance by the sum of between-effect and within-effect variance (sampling error variance).\n$I^2$ is thus, in a standard meta-analysis, the proportion of variance that is due to heterogeneity as opposed to sampling error.\nWhen calculating $I^2$, within-study variance is amalgamated across studies to create a \"typical\" within-study variance which serves as the sampling error variance [@higgins2003; @borenstein2017].\nOur goal here was to visualize and quantify the degree of variation among analyses in effect size estimates [@nakagawa2007].\nWe did not test for statistical significance.\n\n::: {.callout-note appearance=\"simple\"}\n## Additional explanation:\n\nOur use of $I^{2}$ to quantify heterogeneity violates an important assumption, but this violation does not invalidate our use of $I^{2}$ as a metric of how much heterogeneity can derive from analytical decisions.\nIn standard meta-analysis, the statistic $I^{2}$ quantifies the proportion of variance that is greater than we would expect if differences among estimates were due to sampling error alone [@rosenberg2013].\nHowever, it is clear that this interpretation does not apply to our value of $I^{2}$ because $I^{2}$ assumes that each estimate is based on an independent sample (although these analyses can account for non-independence via hierarchical modelling), whereas all our effects were derived from largely or entirely overlapping subsets of the same dataset.\nDespite this, we believe that $I^{2}$ remains a useful statistic for our purposes.\nThis is because, in calculating $I^{2}$, we are still setting a benchmark of expected variation due to sampling error based on the variance associated with each separate effect size estimate, and we are assessing how much (if at all) the variability among our effect sizes exceeds what would be expected had our effect sizes been based on independent data.\nIn other words, our estimates can tell us how much proportional heterogeneity is possible from analytical decisions alone when sample sizes (and therefore meta-analytic within-estimate variance) are similar to the ones in our analyses.\nAmong other implications, our violation of the independent sample assumption means that we (dramatically) over-estimate the variance expected due to sampling error, and because $I^{2}$ is a proportional estimate, we thus underestimate the actual proportion of variance due to differences among analyses other than sampling error.\nHowever, correcting this underestimation would create a trivial value since we designed the study so that much of the variance would derive from analytic decisions as opposed to differences in sampled data.\nInstead, retaining the $I^{2}$ value as typically calculated provides a useful comparison to $I^{2}$ values from typical meta-analyses.\n\nInterpretation of $\\tau^2$ also differs somewhat from traditional meta-analysis, and we discuss this further in the Results.\n:::\n\nFinally, we assessed the extent to which deviations from the meta-analytic mean by individual effect sizes ($Z_r$) or the predicted values of the dependent variable ($y_i$) were explained by the peer rating of each analysis team's method section, by a measurement of the distinctiveness of the set of predictor variables included in each analysis, and by the choice of whether or not to include random effects in the model.\nThe deviation score, which served as the dependent variable in these analyses, is the absolute value of the difference between the meta-analytic mean $\\bar{Z_r}$ (or $\\bar{y_i}$) and the individual $Z_r$ (or $y_i$) estimate for each analysis.\nWe used the Box-Cox transformation on the absolute values of deviation scores to achieve an approximately normal distribution [c.f. @fanelli2013; @fanelli2017].\nWe described variation in this dependent variable with both a series of univariate analyses and a multivariate analysis.\nAll these analyses were general linear (mixed) models.\nThese analyses were secondary to our estimation of variation in effect sizes described above.\nWe wished to quantify relationships among variables, but we had no *a priori* expectation of effect size and made no dichotomous decisions about statistical significance.\n\n::: {#nte-box-weight-deviation .callout-note appearance=\"simple\"}\n## Additional Explanation:\n\nIn our meta-analyses based on Box-Cox transformed deviation scores, we leave these deviation scores unweighted. \nThis is consistent with our registration, which did not mention weighting these scores. However, the fact that we did not mention weighting the scores was actually an error: we had intended to weight them, as is standard in meta-analysis, using the inverse variance of the Box-Cox transformed deviation scores [@eq-folded-variance].\nUnfortunately, when we did conduct the weighted analyses, they produced results in which some weighted estimates differed radically from the unweighted estimate because the weights were invalid. \nSuch invalid weights can sometimes occur when the variance (upon which the weights depend) is partly a function of the effect size, as in our Box-Cox transformed deviation scores [@nakagawa2022]. \nIn the case of the *Eucalyptus* analyses, the most extreme outlier was weighted much more heavily (by close to two orders of magnitude) than any other effect sizes because the effect size was, itself, so high. \nTherefore, we made the decision to avoid weighting by inverse variance in all analyses of the Box-Cox transformed deviation scores. \nThis was further justified because (a) most analyses have at least some moderately unreliable weights, and (b) the sample sizes were mostly very similar to each other across submitted analyses, and so meta-analytic weights are not particularly important here.\nWe systematically investigated the impact of different weighting schemes and random effects on model convergence and results, see @sec-post-hoc-weights-analysis for more details.\n:::\n\nWhen examining the extent to which reviewer ratings (on a scale from 0 to 100) explained deviation from the average effect (or predicted value), each analysis had been rated by multiple peer reviewers, so for each reviewer score to be included, we include each deviation score in the analysis multiple times.\nTo account for the non-independence of multiple ratings of the same analysis, we planned to include analysis identity as a random effect in our general linear mixed model in the *lme4* package in R [@lme4; @base].\nTo account for potential differences among reviewers in their scoring of analyses, we also planned to include reviewer identity as a random effect:\n\n$$ \n\\begin{alignat*}{2}\n{\\mathrm{DeviationScore}_{j}} &=&& \\mathrm{BoxCox}(|\\mathrm{DeviationFromMean}_{j}|) \\\\\n{\\mathrm{DeviationScore}}_{ij} & \\sim &&\\mathrm{Rating}_{ij} + \\\\\n& &&\\mathrm{ReviewerID}_{i} + \\\\\n& && {\\mathrm{AnalysisID}}_{j} \\\\\n{\\mathrm{ReviewerID}}_i &\\sim &&\\mathcal{N}(0,\\sigma_i^2) \\\\\n{\\mathrm{AnalysisID}}_j &\\sim &&\\mathcal{N}(0,\\sigma_j^2) \\\\\n\\end{alignat*}\n$$ {#eq-deviation-rating}\n\nWhere $\\text{DeviationFromMean}_{j}$ is the deviation from the meta-analytic mean for the $j$th analysis, $\\text{ReviewerID}_{i}$ is the random intercept assigned to each $i$ reviewer, and $\\text{AnalysisID}_{j}$ is the random intercept assigned to each $j$ analysis, both of which are assumed to be normally distributed with a mean of 0 and a variance of $\\sigma^{2}$.\nAbsolute deviation scores were Box-Cox transformed using the `step_box_cox()` function from the *timetk* package in R [@timetk; @base].\n\n\n\n\n\n\n\n\n\nWe conducted a similar analysis with the four categories of reviewer ratings ((1) deeply flawed and unpublishable, (2) publishable with major revision, (3) publishable with minor revision, (4) publishable as is) set as ordinal predictors numbered as shown here.\nAs with the analyses above, we planned for these analyses to also include random effects of analysis identity and reviewer identity.\nBoth of these analyses (1: 1-100 ratings as the fixed effect, 2: categorical ratings as the fixed effects) were planned to be conducted eight times for each dataset.\nEach of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) were to be compared once to the initial ratings provided by the peer reviewers, and again based on the revised ratings provided by the peer reviewers.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\n1. We planned to include random effects of both analysis identity and reviewer identity in these models comparing reviewer ratings with deviation scores.\nHowever, after we received the analyses, we discovered that a subset of analyst teams had either conducted multiple analyses and/or identified multiple effects per analysis as answering the target question.\nWe therefore faced an even more complex potential set of random effects.\nWe decided that including team ID and effect ID along with reviewer ID as random effects in the same model would almost certainly lead to model fit problems, and so we started with simpler models including just effect ID and reviewer ID.\nHowever, even with this simpler structure, our dataset was sparse, with reviewers rating a small number of analyses, resulting in models with singular fit (@sec-convergence-singularity).\nRemoving one of the random effects was necessary for the models to converge.\nFor both models of deviation from the meta-analytic mean explained by categorical or continuous reviewer ratings, we removed the random effect of effect ID, leaving reviewer ID as the only random effect.\n\n2. We conducted analyses only with the final peer ratings after the opportunity for revision, not with the initial ratings.\nThis was because when we recorded the final ratings, the initial ratings were over-written, therefore we did not have access to those initial values.\n:::\n\nThe next set of univariate analyses sought to explain deviations from the mean effects based on a measure of the distinctiveness of the set of variables included in each analysis.\nAs a 'distinctiveness' score, we used Sorensen's Similarity Index (an index typically used to compare species composition across sites), treating variables as species and individual analyses as sites.\nTo generate an individual Sorensen's value for each analysis required calculating the pairwise Sorensen's value for all pairs of analyses (of the same dataset), and then taking the average across these Sorensen's values for each analysis.\nWe calculated the Sorensen's index values using the *betapart* package [@betapart] in R:\n\n$$\n\\beta_{\\mathrm{Sorensen}} = \\frac{b+c}{2a+b+c}\n$$ {#eq-sorensen}\n\nwhere $a$ is the number of variables common to both analyses, $b$ is the number of variables that occur in the first analysis but not in the second and $c$ is the number of variables that occur in the second analysis.\nWe then used the per-model average Sorensen's index value as an independent variable to predict the deviation score in a general linear model, and included no random effect since each analysis is included only once, in R [@base]:\n\n$$ \n\\mathrm{DeviationScore}_{j} \\sim \\beta_{\\mathrm{Sorensen}_{j}}\n$$ {#eq-deviation}\n\n::: {.callout-note appearance=\"simple\"}\n## Additional explanation:\n\nWhen we planned this analysis, we anticipated that analysts would identify a single primary effect from each model, so that each model would appear in the analysis only once.\nOur expecation was incorrect because some analysts identified \\>1 effect per analysis, but we still chose to specify our model as registered and not use a random effect.\nThis is because most models produced only one effect and so we expected that specifying a random effect to account for the few cases where \\>1 effect was included for a given model would prevent model convergence.\n\nNote that this analysis contrasts with the analyses in which we used reviewer ratings as predictors because in the analyses with reviewer ratings, each effect appeared in the analysis approximately four times due to multiple reviews of each analysis, and so it was much more important to account for that variance through a random effect.\n:::\n\nNext, we assessed the relationship between the inclusion of random effects in the analysis and the deviation from the mean effect size. We anticipated that most analysts would use random effects in a mixed model framework, but if we were wrong, we wanted to evaluate the differences in outcomes when using random effects versus not using random effects. Thus if there were at least 5 analyses that did and 5 analyses that did not include random effects, we would add a binary predictor variable “random effects included (yes/no)” to our set of univariate analyses and would add this predictor variable to our multivariate model described below. This standard was only met for the *Eucalyptus* analyses, and so we only examined inclusion of random effects as a predictor variable in meta-analysis of this set to analyses. \n\nFinally, we conducted a multivariate analysis with the five predictors described above (peer ratings 0-100 and peer ratings of publishability 1-4; both original and revised and Sorensen's index, plus a sixth for *Eucalyptus*, presence /absence of random effects) with random effects of analysis identity and reviewer identity in the *lme4* package in R [@lme4; @base].\nWe had stated here in the text that we would use only the revised (final) peer ratings in this analysis, so the absence of the initial ratings is not a deviation from our plan:\n\n$$ \n\\begin{alignat*}{3}\n{\\mathrm{DeviationScore}_{j}} &=&& \\mathrm{BoxCox}(|\\mathrm{DeviationFromMean}_{j}|) \\\\\n{\\mathrm{DeviationScore}}_{ij} &\\sim && {\\mathrm{RatingContinuous}}_{ij} + \\\\\n& && {\\mathrm{RatingCategorical}}_{ij} + \\\\\n& && {\\beta_\\mathrm{Sorensen}}_{j} + \\\\\n& && {\\mathrm{AnalysisID}}_{j} + \\\\\n& && {\\mathrm{ReviewerID}}_{i} \\\\\n{\\mathrm{ReviewerID}}_{i} &\\sim &&\\mathcal{N}(0,\\sigma_i^2) \\\\\n{\\mathrm{AnalysisID}}_{j} &\\sim &&\\mathcal{N}(0,\\sigma_j^2)\n\\end{alignat*}\n$$ {#eq-deviation-multivar}\n\nWe conducted all the analyses described above eight times; for each of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) one time for each of the two datasets.\n\nWe have publicly archived all relevant data, code, and materials on the Open Science Framework ().\nArchived data includes the original datasets distributed to all analysts, any edited versions of the data analyzed by individual groups, and the data we analyzed with our meta-analyses, which include the effect sizes derived from separate analyses, the statistics describing variation in model structure among analyst groups, and the anonymized answers to our surveys of analysts and peer reviewers.\nSimilarly, we have archived both the analysis code used for each individual analysis (where available) and the code from our meta-analyses.\nWe have also archived copies of our survey instruments from analysts and peer reviewers.\n\nOur rules for excluding data from our study were as follows.\nWe excluded from our synthesis any individual analysis submitted after we had completed peer review or those unaccompanied by analysis files that allow us to understand what the analysts did.\nWe also excluded any individual analysis that did not produce an outcome that could be interpreted as an answer to our primary question (as posed above) for the respective dataset.\nFor instance, this means that in the case of the data on blue tit chick growth, we excluded any analysis that did not include something that can be interpreted as growth or size as a dependent (response) variable, and in the case of the *Eucalyptus* establishment data, we excluded any analysis that did not include a measure of grass cover among the independent (predictor) variables.\nAlso, as described above, any analysis that could not produce an effect that could be converted to a signed $Z_r$ was excluded from analyses of $Z_r$.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation:\n\nSome analysts had difficulty implementing our instructions to derive the out-of-sample predictions, and in some cases (especially for the *Eucalyptus* data), they submitted predictions with implausibly extreme values.\nWe believed these values were incorrect and thus made the conservative decision to exclude out-of-sample predictions where the estimates were \\> 3 standard deviations from the mean value from the full dataset provided to teams for analysis.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n## Additional explanation: We conducted several unregistered analyses.\n\n**1. Evaluating model fit.**\n\nWe evaluated all fitted models using the `performance::performance()` function from the *performance* package [@performance] and the `glance()` function from the *broom.mixed* package [@broommixed].\nFor all models, we calculated the square root of the residual variance (Sigma) and the root mean squared error (RMSE).\nFor GLMMs `performance::performance()` calculates the marginal and conditional $R^2$ values as well as the contribution of random effects (ICC), based on Nakagawa et al. [-@nakagawa2017].\nThe conditional $R^2$ accounts for both the fixed and random effects, while the marginal $R^2$ considers only the variance of the fixed effects.\nThe contribution of random effects is obtained by subtracting the marginal $R^2$ from the conditional $R^2$.\n\n**2. Exploring outliers and analysis quality.**\n\nAfter seeing the forest plots of $Z_r$ values and noticing the existence of a small number of extreme outliers, especially from the *Eucalyptus* analyses, we wanted to understand the degree to which our heterogeneity estimates were influenced by these outliers.\nTo explore this question, we removed the highest two and lowest two values of $Z_r$ in each dataset and re-calculated our heterogeneity estimates.\n\nTo help understand the possible role of the quality of analyses in driving the heterogeneity we observed among estimates of $Z_r$, we created forest plots and recalculated our heterogeneity estimates after removing all effects from analysis teams that had received at least one rating of \"deeply flawed and unpublishable\" and then again after removing all effects from analysis teams with at least one rating of either \"deeply flawed and unpublishable\" or \"publishable with major revisions\".\nWe also used self-identified levels of statistical expertise to examine heterogeneity when we retained analyses only from analysis teams that contained at least one member who rated themselves as \"highly proficient\" or \"expert\" (rather than \"novice\" or \"moderately proficient\") in conducting statistical analyses in their research area in our intake survey.\n\nAdditionally, to assess potential impacts of highly collinear predictor variables on estimates of $Z_r$ in blue tit analyses, we created forest plots (@fig-forest-plot-Zr-collinear-rm-subset) and recalculated our heterogeneity estimates after we removed analyses that contained the brood count after manipulation and the highly correlated (correlation of 0.89, @fig-ggpairs-bt) brood count at day 14. This removal included the one effect based on a model that contained both these variables and a third highly correlated variable, the estimate of number of chicks fledged (the only model that included the estimate of number of chicks fledged). We did not conduct a similar analysis for the *Eucalyptus* dataset because there were no variables highly collinear with the primary predictors (grass cover variables) in that dataset (@fig-ggpairs-eucalyptus). \n\n\n**3. Exploring possible impacts of lower quality estimates of degrees of freedom.**\n\nOur meta-analyses of variation in $Z_r$ required variance estimates derived from estimates of the degrees of freedom in original analyses from which $Z_r$ estimates were derived.\nWhile processing the estimates of degrees of freedom submitted by analysts, we identified a subset of these estimates in which we had lower confidence because two or more effects from the same analysis were submitted with identical degrees of freedom.\nWe therefore conducted a second set of (more conservative) meta-analyses that excluded these $Z_r$ estimates with identical estimates of degrees of freedom and we present these analyses in the supplement.\n:::\n\n::: {.callout-note appearance=\"simple\"}\n## Additional explanation: Best practices in many-analysts research.\n\nAfter we initiated our project, a paper was published outlining best practices in many-analysts studies [@aczel2021].\nAlthough we did not have access to this document when we implemented our project, our study complies with these practices nearly completely.\nThe one exception is that although we requested analysis code from analysts, we did not require submission of code.\n:::\n\n## **Step 6: Facilitated Discussion and Collaborative Write-Up of Manuscript**\n\nWe planned for analysts and initiating authors to discuss the limitations, results, and implications of the study and collaborate on writing the final manuscript for review as a stage-2 Registered Report.\n\n::: {.callout-note appearance=\"simple\"}\n## Preregistration Deviation: \n\nAs described above, due to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft.\n:::\n\nWe built an R package, `ManyEcoEvo::` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo::` package repository and its outputs are made available to users of the package when the library is loaded. \n\nThe full manuscript, including further analysis and presentation of results is written in Quarto [@AllaireQuarto2024]. The source code to reproduce the manuscript is hosted at [https://github.com/egouldo/ManyAnalysts/](https://github.com/egouldo/ManyAnalysts/), and the rendered version of the source code may be viewed at [https://egouldo.github.io/ManyAnalysts/](https://egouldo.github.io/ManyAnalysts/). All R packages and their versions used in the production of the manuscript are listed at @sec-sesion-info.\n\n\n# Results\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\n## Summary Statistics\n\nIn total, 173 analyst teams, comprising 246 analysts, contributed 182 usable analyses (compatible with our meta-analyses and provided with all information needed for inclusion) of the two datasets examined in this study which yielded 215 effects.\nAnalysts produced 134 distinct effects that met our criteria for inclusion in at least one of our meta-analyses for the blue tit dataset.\nAnalysts produced 81 distinct effects meeting our criteria for inclusion for the *Eucalyptus* dataset.\nExcluded analyses and effects either did not answer our specified biological questions, were submitted with insufficient information for inclusion in our meta-analyses, or were incompatible with production of our effect size(s).\nWe expected cases of this final scenario (incompatible analyses), for instance we cannot extract a $Z_r$ from random forest models, which is why we analyzed two distinct types of effects, $Z_r$ and out-of-sample.\nSome effects only provided sufficient information for a subset of analyses and were only included in that subset.\nFor both datasets, most submitted analyses incorporated mixed effects.\nSubmitted analyses of the blue tit dataset typically specified normal error and analyses of the *Eucalyptus* dataset typically specified a non-normal error distribution (@tbl-Table1).\n\nFor both datasets, the composition of models varied substantially in regards to the number of fixed and random effects, interaction terms, and the number of data points used, and these patterns differed somewhat between the blue tit and *Eucalyptus* analyses (See @tbl-Table2).\nFocussing on the models included in the $Z_r$ analyses (because this is the larger sample), blue tit models included a similar number of fixed effects on average (mean 5.2 $\\pm$ 2.92 $\\text{SD}$, range: 1 to 19) as *Eucalyptus* models (mean 5.01 $\\pm$ 3.83 $\\text{SD}$, range: 1 to 13), but the standard deviation in number of fixed effects was somewhat larger in the *Eucalyptus* models.\nThe average number of interaction terms was much larger for the blue tit models (mean 0.44 $\\pm$ 1.11 $\\text{SD}$, range: 0 to 10) than for the *Eucalyptus* models (mean 0.16 $\\pm$ 0.65 $\\text{SD}$, range: 0 to 5), but still under 0.5 for both, indicating that most models did not contain interaction terms.\nBlue tit models also contained more random effects (mean 3.53 $\\pm$ 2.08 $\\text{SD}$, range: 0 to 10) than *Eucalyptus* models (mean 1.41 $\\pm$ 1.09 $\\text{SD}$, range: 0 to 4).\nThe maximum possible sample size in the blue tit dataset (3720 nestlings) was an order of magnitude larger than the maximum possible in the *Eucalyptus* dataset (351 plots), and the means and standard deviations of the sample size used to derive the effects eligible for our study were also an order of magnitude greater for the blue tit dataset (mean 2611.09 $\\pm$ 937.48 $\\text{SD}$, range: 76 to 76) relative to the *Eucalyptus* models (mean 298.43 $\\pm$ 106.25 $\\text{SD}$, range: 18 to 351).\nHowever, the standard deviation in sample size from the *Eucalyptus* models was heavily influenced by a few cases of dramatic sub-setting (described below).\nApproximately three quarters of *Eucalyptus* models used sample sizes within 3$\\%$ of the maximum.\nIn contrast, fewer than 20$\\%$ of blue tit models relied on sample sizes within 3$\\%$ of the maximum, and approximately 50$\\%$ of blue tit models relied on sample sizes 29$\\%$ or more below the maximum.\n\nAnalysts provided qualitative descriptions of the conclusions of their analyses.\nEach analysis team provided one conclusion per dataset.\nThese conclusions could take into account the results of any formal analyses completed by the team as well as exploratory and visual analyses of the data.\nHere we summarize all qualitative responses, regardless of whether we had sufficient information to use the corresponding model results in our quantitative analyses below.\nWe classified these conclusions into the categories summarized below (@tbl-Table4):\n\n- Mixed: some evidence supporting a positive effect, some evidence supporting a negative effect\n- Conclusive negative: negative relationship described without caveat\n- Qualified negative: negative relationship but only in certain circumstances or where analysts express uncertainty in their result\n- Conclusive none: analysts interpret the results as conclusive of no effect\n- None qualified: analysts describe finding no evidence of a relationship but they describe the potential for an undetected effect\n- Qualified positive: positive relationship described but only in certain circumstances or where analysts express uncertainty in their result\n- Conclusive positive: positive relationship described without caveat\n\nFor the blue tit dataset, most analysts concluded that there was negative relationship between measures of sibling competition and nestling growth, though half the teams expressed qualifications or described effects as mixed or absent.\nNo analysts concluded that there was a positive relationship even though some individual effect sizes were positive, apparently because all analysts who produced effects indicating positive relationships also produced effects indicating negative relationships and therefore described their results as qualified, mixed, or absent.\nFor the *Eucalyptus* dataset, there was a broader spread of conclusions with at least one analyst team providing conclusions consistent with each conclusion category.\nThe most common conclusion for the *Eucalyptus* dataset was that there was no relationship between grass cover and *Eucalyptus* recruitment (either conclusive or qualified description of no relationship), but more than half the teams concluded that there were effects; negative, positive, or mixed.\n\n\n\n\n::: {#tbl-Table4 .cell .column-page-right tbl-cap='Tallies of analysts\\' qualitative answers to the research questions addressed by their analyses.'}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
DatasetMixedNegative ConclusiveNegative QualifiedNone ConclusiveNone QualifiedPositive ConclusivePositive Qualified
blue tit537274100
Eucalyptus8612191224
\n
\n```\n:::\n\n\n\n\n## Distribution of Effects\n\n### Standardized Effect Sizes ($Z_r$)\n\n\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n:::\n\n\n\n\nAlthough the majority (118 of 131) of the usable $Z_r$ effects from the blue tit dataset found nestling growth decreased with sibling competition, and the meta-analytic mean $\\bar{Z_r}$ (Fisher's transformation of the correlation coefficient) was convincingly negative (-0.35 $\\pm$ 0.06 95$\\%$CI), there was substantial variability in the strength and the direction of this effect.\n$Z_r$ ranged from -1.55 to 0.38, and approximately continuously from -0.93 to 0.19 ( @fig-forest-plots-Zr-1 and @tbl-effects-params), and of the 118 effects with negative slopes, 93 had confidence intervals exluding 0.\nOf the 13 with positive slopes indicating increased nestling growth in the presence of more siblings, 2 had confidence intervals excluding zero (@fig-forest-plots-Zr-1).\n\nMeta-analysis of the *Eucalyptus* dataset also showed substantial variability in the strength of effects as measured by $Z_r$, and unlike with the blue tits, a notable lack of consistency in the direction of effects (@fig-forest-plots-Zr-2, @tbl-effects-params).\n$Z_r$ ranged from -4.47 (@fig-specr-euc), indicating a strong tendency for reduced *Eucalyptus* seedling success as grass cover increased, to 0.39, indicating the opposite.\nAlthough the range of reported effects skewed strongly negative, this was due to a small number of substantial outliers.\nMost values of $Z_r$ were relatively small with values $\\lt |0.2|$ and the meta-analytic mean effect size was close to zero (-0.09 $\\pm$ 0.12 95$\\%$CI).\nOf the 79 effects, fifty-three had confidence intervals overlapping zero, approximately a quarter (fifteen) crossed the traditional threshold of statistical significance indicating a negative relationship between grass cover and seedling success, and eleven crossed the significance threshold indicating a positive relationship between grass cover and seedling success (@fig-forest-plots-Zr-2).\n\n\n\n\n::: {.cell}\n\n:::\n\n::: {#fig-forest-plots-Zr .cell .preview-image .column-page-right layout-nrow=\"2\"}\n::: {.cell-output-display}\n![Blue tit analyses: Points where $Z_r$ are less than 0 indicate analyses that found a negative relationship between sibling number and nestling growth.](index_files/figure-html/fig-forest-plots-Zr-1.png){#fig-forest-plots-Zr-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus* analyses: Points where $Z_r$ are less than 0 indicate a negative relationship between grass cover and *Eucalyptus* seedling success.](index_files/figure-html/fig-forest-plots-Zr-2.png){#fig-forest-plots-Zr-2 width=672}\n:::\n\nForest plots of meta-analytic estimated standardized effect sizes ($Z_r$, blue circles) and their 95$\\%$confidence intervals for each effect size included in the meta-analysis model. The meta-analytic mean effect size is denoted by a black triangle and a dashed vertical line, with error bars also representing the 95$\\%$confidence interval. The solid black vertical line demarcates effect size of 0, indicating no relationship between the test variable and the response variable. Note that the *Eucalyptus* plot omits one extreme outlier with the value of -4.47 (@fig-specr-euc) in order to standardize the x-axes on these two panels.\n:::\n\n\n\n\n### Out-of-sample predictions $y_{i}$\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nAs with the effect size $Z_r$, we observed substantial variability in the size of out-of-sample predictions derived from the analysts' models.\nBlue tit predictions (@fig-forest-plot-bt-yi), which were z-score-standardised to accommodate the use of different response variables, always ranged far in excess of one standard deviation.\nIn the $y_{25}$ scenario, model predictions ranged from -1.84 to 0.42 (a range of 2.68 standard deviations), in the $y_{50}$ they ranged from -0.52 to 1.08 (a range of 1.63 standard deviations), and in the $y_{75}$ scenario they ranged from -0.03 to 1.59 (a range of 1.9 standard deviations).\nAs should be expected given the existence of both negative and positive $Z_r$ values, all three out-of-sample scenarios produced both negative and positive predictions, although as with the $Z_r$ values, there is a clear trend for scenarios with more siblings to be associated with smaller nestlings.\nThis is supported by the meta-analytic means of these three sets of predictions which were -0.66 (95$\\%$CI -0.82--0.5) for the $y_{25}$, 0.34 (95$\\%$CI 0.2-0.48) for the $y_{50}$, and 0.67 (95$\\%$CI 0.57-0.77) for the $y_{75}$.\n\n*Eucalyptus* out-of-sample predictions also varied substantially (@fig-euc-yi-forest-plot), but because they were not z-score-standardised and are instead on the original count scale, the types of interpretations we can make differ.\nThe predicted *Eucalyptus* seedling counts per 15 x 15 m plot for the $y_{25}$ scenario ranged from 0.04 to 26.99, for the $y_{50}$ scenario ranged from 0.04 to 44.34, and for the $y_{75}$ scenario they ranged from 0.03 to 61.34.\nThe meta-analytic mean predictions for these three scenarios were similar; 1.27 (95$\\%$CI 0.59-2.3) for the $y_{25}$, 2.92 (95$\\%$CI 0.98-3.89) for the $y_{50}$, and 2.92 (95$\\%$CI 1.59-4.9) for the $y_{75}$ scenarios respectively.\n\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated standardized (z-score) blue tit out-of-sample predictions, $y_i$. Circles represent individual estimates. Triangles represent the meta-analytic mean for each prediction scenario. Dark-blue points correspond to $y_{25}$ scenario, medium-blue points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95$\\%$confidence intervals.](index_files/figure-html/fig-forest-plot-bt-yi-1.png){#fig-forest-plot-bt-yi width=672}\n:::\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated *Eucalyptus* out-of-sample predictions, $y_{i}$, on the response-scale (stems counts). Circles represent individual estimates. Triangles represent the meta-analytic mean for each prediction scenario. Dark-blue points correspond to $y_{25}$ scenario, medium-blue points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (observations more than 3SD above the mean) have been removed prior to model fitting and do not appear on this figure. x-axis is truncated to approx. 40, and thus some error bars are incomplete. See @fig-euc-yi-forest-plot-full for full figure.](index_files/figure-html/fig-euc-yi-forest-plot-1.png){#fig-euc-yi-forest-plot width=672}\n:::\n:::\n\n\n\n\n## Quantifying Heterogeneity\n\n### Effect Sizes ($Z_r$)\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nWe quantified both absolute ($\\tau^{2}$) and relative ($I^{2}$) heterogeneity resulting from analytical variation.\nBoth measures suggest that substantial variability among effect sizes was attributable to the analytical decisions of analysts.\n\nThe total absolute level of variance beyond what would typically be expected due to sampling error, $\\tau^{2}$ (@tbl-effects-heterogeneity), among all usable blue tit effects was 0.08 and for *Eucalyptus* effects was 0.27.\nThis is similar to or exceeding the median value (0.105) of $\\tau^{2}$ found across 31 recent meta-analyses [calculated from the data in @yang2023].\nThe similarity of our observed values to values from meta-analyses of different studies based on different data suggest the potential for a large portion of heterogeneity to arise from analytical decisions.\nFor further discussion of interpretation of $\\tau^{2}$ in our study, please consult discussion of *post hoc* analyses below.\n\n\n\n\n::: {#tbl-effects-heterogeneity .cell tbl-cap='Heterogeneity in the estimated effects $Z_r$ for meta-analyses of: the full dataset, as well as from post hoc analyses wherein analyses with outliers are removed, analyses with effects from analysis teams with at least one \"unpublishable\" rating are excluded, analyses receiving at least one \"major revisions\" rating or worse excluded, analyses from teams with at least one analyst self-rated as \"highly proficient\" or \"expert\" in statistical analysis are included, and (blue tit only) analyses that did not included the pair of highly collinear predictors together. ${\\\\tau}_\\\\text{Team}^{2}$ is the absolute heterogeneity for the random effect `Team`. ${\\\\tau}_\\\\text{effectID}^{2}$ is the absolute heterogeneity for the random effect `effectID` nested under `Team`. `effectID` is the unique identifier assigned to each individual statistical effect submitted by an analysis team. We nested `effectID` within analysis team identity (`Team`) because analysis teams often submitted >1 statistical effect, either because they considered >1 model or because they derived >1 effect per model, especially when a model contained a factor with multiple levels that produced >1 contrast. ${\\\\tau}_\\\\text{Total}^{2}$ is the total absolute heterogeneity. ${I}_\\\\text{Total}^{2}$ is the proportional heterogeneity; the proportion of the variance among effects not attributable to sampling error, ${I}_\\\\text{Team}^{2}$ is the subset of the proportional heterogeneity due to differences among `Teams` and ${I}_\\\\text{Team, effectID}^{2}$ is subset of the proportional heterogeneity attributable to among-`effectID` differences.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n

Dataset

\n

$${N}_\\text{Obs}$$

\n

$${\\tau}_\\text{Total}^{2}$$

\n

$${\\tau}_\\text{Team}^{2}$$

\n

$${\\tau}_\\text{effectID}^{2}$$

\n

$${I^2}_\\text{Total}$$

\n

$${I^2}_\\text{Team}$$

\n

$${I^2}_\\text{Team, effectID}$$

\n
All analyses
Eucalyptus790.270.020.2598.59%6.89%91.70%
blue tit1310.080.030.0597.61%36.71%60.90%
Blue tit analyses containing highly collinear predictors removed
blue tit1170.070.040.0396.92%58.18%38.75%
All analyses, Outliers removed
Eucalyptus750.010.000.0166.19%19.25%46.94%
blue tit1270.070.040.0296.84%64.63%32.21%
Analyses receiving at least one 'Unpublishable' rating removed
Eucalyptus550.010.010.0179.74%28.31%51.43%
blue tit1090.080.030.0597.52%35.68%61.84%
Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed
Eucalyptus130.030.030.0088.91%88.91%0.00%
blue tit320.140.010.1398.72%5.17%93.55%
Analyses from teams with highly proficient or expert data analysts
Eucalyptus340.580.020.5699.41%3.47%95.94%
blue tit890.090.030.0697.91%31.43%66.49%
\n
\n```\n\n:::\n:::\n\n\n\n\nIn our analyses, $I^{2}$ is a plausible index of how much more variability among effect sizes we have observed, as a proportion, than we would have observed if sampling error were driving variability.\nWe discuss our interpretation of $I^{2}$ further in the methods, but in short, it is a useful metric for comparison to values from published meta-analyses and provides a plausible value for how much heterogeneity could arise in a normal meta-analysis with similar sample sizes due to analytical variability alone.\nIn our study, total $I^{2}$ for the blue tit $Z_r$ estimates was extremely large, at 97.61%, as was the *Eucalyptus* estimate (98.59% @tbl-effects-heterogeneity).\n\nAlthough the overall $I^{2}$ values were similar for both *Eucalyptus* and blue tit analyses, the relative composition of that heterogeneity differed.\nFor both datasets, the majority of heterogeneity in $Z_r$ was driven by differences among effects as opposed to differences among teams, though this was more prominent for the *Eucalyptus* dataset, where nearly all of the total heterogeneity was driven by differences among effects (91.7%) as opposed to differences among teams (6.89%) (@tbl-effects-heterogeneity).\n\n### Out-of-sample predictions ($y_{i}$)\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nWe observed substantial heterogeneity among out-of-sample estimates, but the pattern differed somewhat from the $Z_r$ values (@tbl-yi-heterogeneity).\nAmong the blue tit predictions, $I^{2}$ ranged from medium-high for the $y_{25}$ scenario (68.54) to low (27.9) for the $y_{75}$ scenario.\nAmong the *Eucalyptus* predictions, $I^{2}$ values were uniformly high (\\>82%).\nFor both datasets, most of the existing heterogeneity among predicted values was attributable to among-team differences, with the exception of the $y_{50}$ analysis of the *Eucalyptus* dataset.\nWe are limited in our interpretation of $\\tau^{2}$ for these estimates because, unlike for the $Z_r$ estimates, we have no benchmark for comparison with other meta-analyses.\n\n\n\n\n::: {#tbl-yi-heterogeneity .cell tbl-cap='Heterogeneity among the out-of-sample predictions ${y}_{i}$ for both blue tit and *Eucalyptus* datasets. ${\\tau}_\\text{Team}^{2}$ is the absolute heterogeneity for the random effect `Team`. ${\\tau}_\\text{effectID}^{2}$ is the absolute heterogeneity for the random effect `effectID` nested under `Team`. `effectID` is the unique identifier assigned to each individual statistical effect submitted by an analysis team. We nested `effectID` within analysis team identity (`Team`) because analysis teams often submitted >1 statistical effect, either because they considered >1 model or because they derived >1 effect per model, especially when a model contained a factor with multiple levels that produced >1 contrast. ${\\tau}_\\text{Total}^{2}$ is the total absolute heterogeneity. ${I}_\\text{Total}^{2}$ is the proportional heterogeneity; the proportion of the variance among effects not attributable to sampling error, ${I}_\\text{Team}^{2}$ is the subset of the proportional heterogeneity due to differences among `Teams` and ${I}_\\text{Team,effectID}^{2}$ is subset of the proportional heterogeneity attributable to among-`effectID` differences.'}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n

Prediction Scenario

\n

$${N}_\\text{Obs}$$

\n

$${\\tau}_\\text{Total}$$

\n

$${\\tau}_\\text{Team}^{2}$$

\n

$${\\tau}_\\text{effectID}^{2}$$

\n

$${I}_\\text{Total}^{2}$$

\n

$${I}_\\text{Team}^{2}$$

\n

$${I}_{\\text{Team, effectID}}^{2}$$

\n

blue tit

\n
$$y_{25}$$630.230.110.0368.54%53.43%15.11%
$$y_{50}$$600.230.060.0050%46.29%3.71%
$$y_{75}$$630.230.020.0027.9%27.89%0.01%

Eucalyptus

\n
$$y_{25}$$385.751.480.6886.93%59.54%27.39%
$$y_{50}$$385.751.320.8389.63%55%34.64%
$$y_{75}$$385.751.030.4180.19%57.41%22.78%
\n
\n```\n:::\n\n\n\n\n## Post-hoc Analysis: Exploring outlier characteristics and the effect of outlier removal on heterogeneity\n\n### Effect Sizes ($Z_r$)\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nThe outlier *Eucalyptus* $Z_r$ values were striking and merited special examination.\nThe three negative outliers had very low sample sizes were based on either small subsets of the dataset or, in one case, extreme aggregation of data.\nThe outliers associated with small subsets had sample sizes ($n=$ 117, 90, 18) that were less than half of the total possible sample size of 351.\nThe case of extreme aggregation involved averaging all values within each of the 351 sites in the dataset.\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nSurprisingly, both the largest and smallest effect sizes in the blue tit analyses (@fig-forest-plots-Zr-1) come from the same analyst (anonymous ID: 'Adelong'), with identical models in terms of the explanatory variable structure, but with different response variables.\nHowever, the radical change in effect was primarily due to collinearity with covariates.\nThe primary predictor variable (brood count after manipulation) was accompanied by several collinear variables, including the highly collinear (correlation of 0.89 @fig-ggpairs-bt) covariate (brood count at day 14) in both analyses.\nIn the analysis of nestling weight, brood count after manipulation showed a strong positive partial correlation with weight after controlling for brood count at day 14 and treatment category (increased, decreased, unmanipulated).\nIn that same analysis, the most collinear covariate (the day 14 count) had a negative partial correlation with weight.\nIn the analysis with tarsus length as the response variable, these partial correlations were almost identical in absolute magnitude, but reversed in sign and so brood count after manipulation was now the collinear predictor with the negative relationship.\nThe two models were therefore very similar, but the two collinear predictors simply switched roles, presumably because a subtle difference in the distribution of weight and tarsus length data.\n\nWhen we dropped the *Eucalyptus* outliers, $I^{2}$ decreased from high (98.59 $\\%$), using Higgins' [@higgins2003] suggested benchmark, to between moderate and high (66.19 $\\%$, @tbl-effects-heterogeneity).\nHowever, more notably, $\\tau^2$ dropped from 0.27 to 0.01, indicating that, once outliers were excluded, the observed variation in effects was similar to what we would expect if sampling error were driving the differences among effects (since $\\tau^2$ is the variance in addition to that driven by sampling error).\nThe interpretation of this value of $\\tau^2$ in the context of our many-analyst study is somewhat different than a typical meta-analysis, however, since in our study (especially for *Eucalyptus*, where most analyses used almost exactly the same data points), there is almost no role for sampling error in driving the observed differences among the estimates.\nThus, rather than concluding that the variability we observed among estimates (after removing outliers) was due only to sampling error [because $\\tau^2$ became small: 10$\\%$ of the median from @yang2023], we instead conclude that the observed variability, which must be due to the divergent choices of analysts rather than sampling error, is approximately of the same magnitude as what we would have expected if, instead, sampling error, and not analytical heterogeneity, were at work.\nPresumably, if sampling error had actually also been at work, it would have acted as an additional source of variability and would have led total variability among estimates to be higher.\nWith total variability higher and thus greater than expected due to sampling error alone, $\\tau^2$ would have been noticeably larger.\nConversely, dropping outliers from the set of blue tit effects did not meaningfully reduce $I^{2}$ , and only modestly reduced $\\tau^2$ (@tbl-effects-heterogeneity).\nThus, effects at the extremes of the distribution were much stronger contributors to total heterogeneity for effects from analyses of the *Eucalyptus* than for the blue tit dataset.\n\n\n\n\n::: {#tbl-effects-params .cell tbl-cap='Estimated mean value of the standardised correlation coefficient, $Z_r$, along with its standard error and 95$\\\\%$confidence intervals. We re-computed the meta-analysis for different post-hoc subsets of the data: All eligible effects, removal of effects from blue tit analyses that contained a pair of highly collinear predictor variables, removal of effects from analysis teams that received at least one peer rating of \"deeply flawed and unpublishable\", removal of any effects from analysis teams that received at least one peer rating of either \"deeply flawed and unpublishable\" or \"publishable with major revisions\", inclusion of only effects from analysis teams that included at least one member who rated themselves as \"highly proficient\" or \"expert\" at conducting statistical analyses in their research area.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Dataset

$$\\hat\\mu$$

\n

$$\\text{SE}[\\hat\\mu]$$

\n

95%CI

\n
statistic

p

\n
All analyses
Eucalyptus−0.090.06[−0.22,0.03]−1.470.14
blue tit−0.350.03[−0.41,−0.29]−11.02<0.001
Blue tit analyses containing highly collinear predictors removed
blue tit−0.360.03[−0.42,−0.29]−10.97<0.001
All analyses, outliers removed
Eucalyptus−0.030.01[−0.06,0.00]−2.230.026
blue tit−0.360.03[−0.42,−0.30]−11.48<0.001
Analyses receiving at least one 'Unpublishable' rating removed
Eucalyptus−0.020.02[−0.07,0.02]−1.150.3
blue tit−0.360.03[−0.43,−0.30]−10.82<0.001
Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed
Eucalyptus−0.040.05[−0.15,0.07]−0.770.4
blue tit−0.370.07[−0.51,−0.23]−5.34<0.001
Analyses from teams with highly proficient or expert data analysts
Eucalyptus−0.170.13[−0.43,0.10]−1.240.2
blue tit−0.360.04[−0.44,−0.28]−8.93<0.001
\n
\n```\n\n:::\n:::\n\n\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller and the pattern of outliers differed.\n\n## Post-hoc analysis: Exploring the effect of removing analyses with poor peer ratings on heterogeneity\n\n### Effect Sizes ($Z_r$)\n\nRemoving poorly rated analyses had limited impact on the meta-analytic means (@fig-all-forest-plots-Zr).\nFor the *Eucalyptus* dataset, the meta-analytic mean shifted from -0.09 to -0.02 when effects from analyses rated as unpublishable were removed, and to -0.04 when effects from analyses rated, at least once, as unpublishable or requiring major revisions were removed.\nFurther, the confidence intervals for all of these means overlapped each of the other means (@tbl-effects-params).\nWe saw similar patterns for the blue tit dataset, with only small shifts in the meta-analytic mean, and confidence intervals of all three means overlapping each other mean (@tbl-effects-params).\nRefitting the meta-analysis with a fixed effect for categorical ratings also showed no indication of differences in group meta-analytic means due to peer ratings (@fig-euc-cat-ratings-MA).\n\nFor the blue tit dataset, removing poorly-rated analyses led to only negligible changes in ${I}_\\text{Total}^{2}$ and relatively minor impacts on $\\tau^{2}$ .\nHowever, for the *Eucalyptus* dataset, removing poorly-rated analyses led to notable reductions in ${I}_\\text{Total}^{2}$ and substantial reductions in $\\tau^{2}$.\nWhen including all analyses, the *Eucalyptus* ${I}_\\text{Total}^{2}$ was 98.59% and $\\tau^{2}$ was 0.27, but eliminating analyses with ratings of \"unpublishable\" reduced ${I}_\\text{Total}^{2}$ to 79.74% and $\\tau^{2}$ to 0.01, and removing also those analyses \"needing major revisions\" left ${I}_\\text{Total}^{2}$ at 88.91% and $\\tau^{2}$ at 0.03 (@tbl-effects-heterogeneity).\nAdditionally, the allocations of $I^{2}$ to the team versus individual effect were altered for both blue tit and *Eucalyptus* meta-analyses by removing poorly rated analyses, but in different ways.\nFor blue tit meta-analysis, between a third and two-thirds of the total $I^{2}$ was attributable to among-team variance in most analyses until both analyses rated \"unpublishable\" and analyses rated in need of \"major revision\" were eliminated, in which case almost all remaining heterogeneity was attributable to among-effect differences.\nIn contrast, for *Eucalyptus* meta-analysis, the among-team component of $I^{2}$ was less than third until both analyses rated \"unpublishable\" and analyses rated in need of \"major revision\" were eliminated, in which case almost 90$\\%$of heterogeneity was attributable to differences among teams.\n\n### Out-of-sample predictions $y_{i}$\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller and our ability to interpret heterogeneity values for these analyses was limited.\n\n## Post-hoc analysis: Exploring the effect of including only analyses conducted by analysis teams with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statistical analyses in their research area\n\n### Effect Sizes ($Z_r$)\n\nIncluding only analyses conducted by teams that contained at least one member who rated themselves as \"highly proficient\" or \"expert\" in conducting the relevant statistical methods had negligible impacts on the meta-analytic means (@tbl-effects-params), the distribution of $Z_r$ effects (@fig-forest-plot-expertise), or heterogeneity estimates (@tbl-effects-heterogeneity), which remained extremely high.\n\n### Out-of-sample predictions $y_{i}$\n\nWe did not conduct these *post hoc* analyses on the out-of-sample predictions as the number of eligible effects was smaller.\n\n## Post-hoc analysis: Exploring the effect of excluding estimates of $Z_r$ in which we had reduced confidence\n\nAs described in our addendum to the methods, we identified a subset of estimates of $Z_r$ in which we had less confidence because of features of the submitted degrees of freedom.\nExcluding these effects in which we had lower confidence had minimal impact on the meta-analytic mean and the estimates of total $I^{2}$ and $\\tau^{2}$ for both blue tit and *Eucalyptus* meta-analyses, regardless of whether outliers were also excluded (@tbl-Zr-exclusion-subsetting).\n\n## Post-hoc analysis: Exploring the effect of excluding effects from blue tit models that contained two highly collinear predictors\n\n### Effect Sizes ($Z_r$)\n\nExcluding effects from blue tit models that contained the two highly collinear predictors (brood count after manipulation and brood count at day 14) had negligible impacts on the meta-analytic means (@tbl-effects-params), the distribution of $Z_r$ effects (@fig-forest-plot-Zr-collinear-rm-subset), or heterogeneity estimates (@tbl-effects-heterogeneity), which remained high.\n\n### Out-of-sample predictions $y_{i}$\n\nInclusion of collinear predictors does not harm model prediction, and so we did not conduct these *post-hoc* analyses.\n\n## Explaining Variation in Deviation Scores\n\nNone of the pre-registered predictors explained substantial variation in deviation among submitted statistical effects from the meta-analytic mean (@tbl-model-summary-stats-ratings-cont, @tbl-deviation-rating-estimates).\n\n\n\n\n::: {#tbl-model-summary-stats-ratings-cont .cell tbl-cap='Summary metrics for registered models seeking to explain deviation (Box-Cox transformed absolute deviation scores) from the mean $Z_r$ as a function of Sorensen\\'s Index, categorical peer ratings, and continuous peer ratings for blue tit and *Eucalyptus* analyses, and as a function of the presence or absence of random effects (in the analyst\\'s models) for *Eucalyptus* analyses. We report coefficient of determination, $R^2$, for our models including only fixed effects as predictors of deviation, and we report $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) from our models that included both fixed and random effects. For all our models, we calculated the residual standard deviation $\\sigma$ and root mean squared error (RMSE).'}\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n \n \n
Dataset

$$N_\\text{Obs.}$$

\n

$$R^2$$

\n

$${R}_\\text{Conditional}^{2}$$

\n

$${R}_\\text{Marginal}^{2}$$

\n
ICC

$$\\sigma$$

\n
RMSE
Deviation explained by categorical ratings
Eucalyptus346
0.130.010.121.061.02
blue tit473
0.097.47 × 10−30.080.50.48
Deviation explained by continuous ratings
Eucalyptus346
0.127.44 × 10−30.111.061.03
blue tit473
0.093.44 × 10−30.090.50.48
Deviation explained by Sorensen's index
Eucalyptus791.84 × 10−4


1.121.1
blue tit1316.32 × 10−3


0.510.51
Deviation explained by inclusion of random effects
Eucalyptus798.75 × 10−8


1.121.1
\n
\n```\n\n:::\n:::\n\n::: {#tbl-deviation-rating-estimates .cell tbl-cap='Parameter estimates from models of Box-Cox transformed deviation scores from the mean $Z_r$ as a function of continuous and categorical peer ratings, Sorensen scores, and the inclusion of random effects. Standard Errors (SE), 95% confidence intervals (95% CI) are reported for all estimates, while t values, degrees of freedom and p-values are presented for fixed-effects. Note that positive parameter estimates mean that as the predictor variable increases, so does the absolute value of the deviation from the meta-analytic mean.'}\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
ParameterRandom EffectCoefficientSE

95%CI

\n
tdf

p

\n

Deviation explained by inclusion of random effects - Eucalyptus

\n
(Intercept)
-2.530.27[-3.06, -1.99] -9.3177<0.001
Mixed model
0.000.31[-0.60, 0.60] 0.0077>0.9

Deviation explained by Sorensen’s index - Eucalyptus

\n
(Intercept)
-2.651.05[-4.70, -0.60] -2.53770.011
Mean Sorensen's index
0.181.51[-2.78, 3.14] 0.1277>0.9

Deviation explained by Sorensen’s index - blue tit

\n
(Intercept)
-1.530.28[-2.08, -0.98] -5.42129<0.001
Mean Sorensen's index
0.420.47[-0.49, 1.34] 0.911290.4

Deviation explained by continuous ratings - Eucalyptus

\n
(Intercept)-2.230.23[-2.69, -1.78] -9.65342<0.001
RateAnalysis-0.0040[-0.011, 0]-1.443420.15
SD (Intercept)Reviewer ID 0.370.09[ 0.24, 0.60]


SD (Observations)Residual1.060.04[0.98, 1.15]


Deviation explained by continuous ratings - blue tit

\n
(Intercept)-1.160.11[-1.37, -0.94]-10.60469<0.001
RateAnalysis-0.0020[-0.004, 0]-1.224690.2
SD (Intercept)Reviewer ID 0.160.03[ 0.10, 0.24]


SD (Observations)Residual0.50.02[0.46, 0.53]


Deviation explained by categorical ratings - Eucalyptus

\n
(Intercept)-2.660.27[-3.18, -2.13] -9.97340<0.001
Publishable with major revision 0.290.29[-0.27, 0.85] 1.023400.3
Publishable with minor revision 0.010.28[-0.54, 0.56] 0.04340>0.9
Publishable as is 0.050.31[-0.55, 0.66] 0.173400.9
SD (Intercept)Reviewer ID 0.390.09[ 0.25, 0.61]


SD (Observations)Residual1.060.04[0.98, 1.15]


Deviation explained by categorical ratings - blue tit

\n
(Intercept)-1.110.11[-1.33, -0.89] -9.91467<0.001
Publishable with major revision-0.190.12[-0.42, 0.04] -1.624670.10
Publishable with minor revision-0.190.12[-0.42, 0.04] -1.654670.10
Publishable as is-0.130.13[-0.39, 0.12] -1.024670.3
SD (Intercept)Reviewer ID 0.150.04[ 0.10, 0.24]


SD (Observations)Residual0.50.02[0.46, 0.53]


\n
\n```\n\n:::\n:::\n\n\n\n\n## Deviation Scores as explained by Reviewer Ratings\n\n### Effect Sizes ($Z_r$)\n\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\nWe obtained reviews from 153 reviewers who reviewed analyses for a mean of 3.27 (range 1 - 11) analysis teams.\nAnalyses of the blue tit dataset received a total of 240 reviews, each was reviewed by a mean of 3.87 ($\\text{SD}$ 0.71, range 3-5) reviewers.\nAnalyses of the *Eucalyptus* dataset received a total of 178 reviews, each was reviewed by a mean of 4.24 ($\\text{SD}$ 0.79, range 3-6) reviewers.\nWe tested for inter-rater-reliability to examine how similarly reviewers reviewed each analysis and found approximately no agreement among reviewers.\nWhen considering continuous ratings, IRR was 0.01, and for categorical ratings, IRR was -0.14.\n\nMany of the models of deviation as a function of peer ratings faced issues of failure to converge or singularity due to sparse design matrices with our pre-registered random effects (`Effect_Id` and `Reviewer_ID`) ([see supplementary material -@tbl-explore-Zr-deviation-random-effects-structure]).\nThese issues persisted after increasing the tolerance and changing the optimizer.\nFor both *Eucalyptus* and blue tit datasets, models with continuous ratings as a predictor were singular when both pre-registered random effects were included.\n\nWhen using only categorical ratings as predictors, models converged only when specifying reviewer ID as a random effect.\nThat model had a ${R}_{C}^2$ of 0.09 and a ${R}_{M}^2$ of 0.01.\nThe model using the continuous ratings converged for both random effects (in isolation), but not both.\nWe present results for the model using study ID as a random effect because we expected it would be a more important driver of variation in deviation scores.\nThat model had a ${R}_{C}^2$ of 0.09 and a ${R}_{M}^2$ of 0.01 for the blue tit dataset and a ${R}_{C}^2$ of 0.12 and a ${R}_{M}^2$ of 0.01 for the *Eucalyptus* dataset.\nNeither continuous or categorical reviewer ratings of the analyses meaningfully predicted deviance from the meta-analytic mean (@tbl-deviation-rating-estimates, @fig-cat-peer-rating).\nWe re-ran the multi-level meta-analysis with a fixed-effect for the categorical publishability ratings and found no difference in mean standardised effect sizes among publishability ratings (@fig-euc-cat-ratings-MA).\n\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Violin plot of Box-Cox transformed deviation from meta-analytic mean $Z_r$ as a function of categorical peer rating. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95$\\%$CI of the estimate. **A** Blue tit dataset, **B** *Eucalyptus* dataset.](index_files/figure-html/fig-cat-peer-rating-1.png){#fig-cat-peer-rating width=960}\n:::\n:::\n\n\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nSome models of the influence of reviewer ratings on out-of-sample predictions ($y_{i}$) had issues with convergence and singularity of fit (see @tbl-yi-deviation-ratings-convergence-singularity) and those models that converged and were not singular showed no strong relationship (@fig-yi-deviation-cat-rating, @fig-yi-deviation-cont-rating), as with the $Zr$ analyses.\n\n## Deviation scores as explained by the distinctiveness of variables in each analysis\n\n### Effect Sizes ($Z_r$)\n\nWe employed Sorensen's index to calculate the distinctiveness of the set of predictor variables used in each model (@fig-sorensen-plots).\nThe mean Sorensen's score for blue tit analyses was 0.59 ($\\text{SD}$: 0.1, range 0.43-0.86), and for *Eucalyptus* analyses was 0.69 ($\\text{SD}$: 0.08, range 0.55-0.98).\n\nWe found no meaningful relationship between distinctiveness of variables selected and deviation from the meta-analytic mean (@tbl-deviation-rating-estimates, @fig-sorensen-plots) for either blue tit (mean 0.42, 95$\\%$CI -0.49,1.34) or *Eucalyptus* effects (mean 0.18, 95$\\%$CI -2.78,3.14).\n\n\n\n\n::: {#fig-sorensen-plots .cell layout-nrow=\"2\"}\n::: {.cell-output-display}\n![Blue tit](index_files/figure-html/fig-sorensen-plots-1.png){#fig-sorensen-plots-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus*](index_files/figure-html/fig-sorensen-plots-2.png){#fig-sorensen-plots-2 width=672}\n:::\n\nFitted model of the Box-Cox-transformed deviation score (deviation in effect size from meta-analytic mean) as a function of the mean Sorensen's index showing distinctiveness of the set of predictor variables. Grey ribbons on predicted values are 95$\\%$CI's.\n:::\n\n\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nAs with the $Z_r$ estimates, we did not observe any convincing relationships between deviation scores of out-of-sample predictions and Sorensen's index values.\nPlease see [supplementary material -@sec-sorensen-yi].\n\n## Deviation scores as explained by the inclusion of random effects\n\n### Effect Sizes ($Z_r$)\n\nThere were only three blue tit analyses that did not include random effects, which is below the pre-registered threshold for fitting a model of the Box-Cox transformed deviation from the meta-analytic mean as a function of whether the analysis included random-effects.\nHowever, 17 *Eucalyptus* analyses included only fixed effects, which crossed our pre-registered threshold.\nConsequently, we performed this analysis for the *Eucalyptus* dataset only.\nThere was no relationship between random-effect inclusion and deviation from meta-analytic mean among the *Eucalyptus* analyses (@tbl-deviation-rating-estimates, @fig-mixed-effect-marginal-means-plot).\n\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Violin plot of mean Box-Cox transformed deviation from meta-analytic mean as a function of random-effects inclusion in *Eucalyptus* analyses. White point for each group of analyses denotes model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. ](index_files/figure-html/fig-mixed-effect-marginal-means-plot-1.png){#fig-mixed-effect-marginal-means-plot width=672}\n:::\n:::\n\n\n\n\n### Out-of-sample predictions ($y_{i}$)\n\nAs with the $Z_r$ estimates, we did not examine the possibility of a relationship between the inclusion of random effects and the deviation scores of the blue tit out-of-sample predictions.\nWhen we examined the possibility of this relationship for the *Eucalyptus* effects, we found consistent evidence of somewhat higher Box-Cox-transformed deviation values for models including a random effect, meaning the models including random effects averaged slightly higher deviation from the meta-analytic means (@fig-yi-euc-deviation-RE-plots).\n\n## Multivariate Analysis Effect size ($Z_r$) and Out-of-sample predictions ($y_{i}$)\n\nLike the univariate models, the multivariate models did a poor job of explaining deviations from the meta-analytic mean.\nBecause we pre-registered a multivariate model that contained collinear predictors that produce results which are not readily interpretable, we present these models in the supplement.\nWe also had difficulty with convergence and singularity for multivariate models of out-of-sample ($y_i$) result, and had to adjust which random effects we included (@tbl-yi-multivar-singularity-convergence).\nHowever, no multivariate analyses of *Eucalyptus* out-of-sample results avoided problems of convergence or singularity, no matter which random effects we included (@tbl-yi-multivar-singularity-convergence).\nWe therefore present no multivariate *Eucalyptus* $y_i$ models.\nWe present parameter estimates from multivariate $Z_r$ models for both datasets (@tbl-multivariate-models-coefs, @tbl-multivariate-models-mod-summary) and from $y_i$ models from the blue tit dataset (@tbl-BT-yi-multivar-summary, @tbl-BT-yi-multivar-params).\nWe include interpretation of the results from these models in the supplement, but the results do not change the interpretations we present above based on the univariate analyses.\n\n# Discussion\n\nWhen a large pool of ecologists and evolutionary biologists analyzed the same two datasets to answer the corresponding two research questions, they produced substantially heterogeneous sets of answers.\nAlthough the variability in analytical outcomes was high for both datasets, the patterns of this variability differed distinctly between them.\nFor the blue tit dataset, there was nearly continuous variability across a wide range of $Z_r$ values.\nIn contrast, for the *Eucalyptus* dataset, there was less variability across most of the range, but more striking outliers at the tails.\nAmong out-of-sample predictions, there was again almost continuous variation across a wide range (2 $\\text{SD}$) among blue tit estimates.\nFor *Eucalyptus*, out-of-sample predictions were also notably variable, with about half the predicted stem count values at \\<2 but the other half being much larger, and ranging to nearly 40 stems per 15 m x 15 m plot.\nWe investigated several hypotheses for drivers of this variability within datasets, but found little support for any of these.\nMost notably, even when we excluded analyses that had received one or more poor peer reviews, the heterogeneity in results largely persisted.\nRegardless of what drives the variability, the existence of such dramatically heterogeneous results when ecologists and evolutionary biologists seek to answer the same questions with the same data should trigger conversations about how ecologists and evolutionary biologists analyze data and interpret the results of their own analyses and those of others in the literature [e.g., @silberzahn2018; @simonsohn2020; @auspurg2021; @breznau2022].\n\nOur observation of substantial heterogeneity due to analytical decisions is consistent with a small earlier study in ecology [@stanton-geddes2014] and a growing body of work from the quantitative social sciences [e.g., @silberzahn2018; @botvinik-nezer2020; @huntington-klein2021; @schweinsberg2021; @breznau2022; @coretta2023].\nIn these studies, when volunteers from the discipline analyzed the same data, they produced a worryingly diverse set of answers to a pre-set question.\nThis diversity included a wide range of effect sizes, and in most cases, even involved effects in opposite directions.\nThus, our result should not be viewed as an anomalous outcome from two particular datasets, but instead as evidence from additional disciplines regarding the heterogeneity that can emerge from analyses of complex datasets to answer questions in probabilistic science.\nNot only is our major observation consistent with other studies, it is, itself, robust because it derived primarily from simple forest plots that we produced based on a small set of decisions that were mostly registered before data gathering and which conform to widely accepted meta-analytic practices.\n\nUnlike the strong pattern we observed in the forest plots, our other analyses, both registered and *post hoc*, produced either inconsistent patterns, weak patterns, or the absence of patterns.\nOur registered analyses found that deviations from the meta-analytic mean by individual effect sizes ($\\bar{Z_r}$) or the predicted values of the dependent variable ($\\bar{y}$) were poorly explained by our hypothesized predictors: peer rating of each analysis team's method section, a measurement of the distinctiveness of the set of predictor variables included in each analysis, or whether the model included random effects.\nHowever, in our *post hoc* analyses, we found that dropping analyses identified as unpublishable or in need of major revision by at least one reviewer modestly reduced the observed heterogeneity among the $Z_r$ outcomes, but only for *Eucalyptus* analyses, apparently because this led to the dropping of the major outlier.\nThis limited role for peer review in explaining the variability in our results should be interpreted cautiously because the inter-rater reliability among peer reviewers was extremely low, and at least some analyses that appeared flawed to us were not marked as flawed by reviewers. \nThus it seems that the peer reviews we received were of mixed quality, possibly due to lack of expertise or lack of care on the part of some reviewers.\nHowever, the hypothesis that poor quality analyses drove a substantial portion of the heterogeneity we observed was also contradicted by our observation that analysts' self-declared statistical expertise appeared unrelated to heterogeneity.\nWhen we retained only analyses from teams including at least one member with high self-declared levels of expertise, heterogeneity among effect sizes remained high.\nThus, our results suggest lack of statistical expertise is not the primary factor responsible for the heterogeneity we observed, although further work is merited before rejecting a role for statistical expertise.\nBesides variability in expertise, it is also possible that the volunteer analysts varied in the effort they invested, and low effort presumably drove at least some heterogeneity in results. However, analysts often submitted thoughtful and extensive code, tables, figures, and textual explanation and interpretations, which is evidence of substantial investment.\nFurther, we are confident that low effort alone is an insufficient explanation for the heterogeneity we observed because we have worked with these datasets ourselves, and we know from experience that there are countless plausible modeling alternatives that can produce a diversity of effects. \nAdditionally, heterogeneity in analytical outcomes differed notably between datasets, and there is no reason to expect that one set of analysts took this project less seriously than the other.\nReturning to our exploratory analyses, not surprisingly, simply dropping outlier values of $Z_r$ for *Eucalyptus* analyses, which had more extreme outliers, led to less observable heterogeneity in the forest plots, and also reductions in our quantitative measures of heterogeneity.\nWe did not observe a similar effect in the blue tit dataset because that dataset had outliers that were much less extreme and instead had more variability across the core of the distribution.\n\nOur major observations raise two broad questions; why was the variability among results so high, and why did the pattern of variability differ between our two datasets.\nOne important and plausible answer to the first question is that much of the heterogeneity derives from the lack of a precise relationship between the two biological research questions we posed and the data we provided.\nThis lack of a precise relationship between data and question creates many opportunities for different model specifications, and so may inevitably lead to varied analytical outcomes [@auspurg2021].\nHowever, we believe that the research questions we posed are consistent with the kinds of research question that ecologists and evolutionary biologists typically work from.\nWhen designing the two biological research questions, we deliberately sought to represent the level of specificity we typically see in these disciplines.\nThis level of specificity is evident when we look at the research questions posed by some recent meta-analyses in these fields:\n\n- \"how \\[does\\] urbanisation impact mean phenotypic values and phenotypic variation ... \\[in\\] paired urban and non-urban comparisons of avian life-history traits\" [@capilla-lasheras2022]\n\n- \"\\[what are\\] the effects of ocean acidification on the crustacean exoskeleton, assessing both exoskeletal ion content (calcium and magnesium) and functional properties (biomechanical resistance and cuticle thickness)\" [@siegel2022]\n\n- \"\\[what is\\] the extent to which restoration affects both the mean and variability of biodiversity outcomes ... \\[in\\] terrestrial restoration\" [@atkinson2022]\n\n- \"\\[does\\] drought stress \\[have\\] a negative, positive, or null effect on aphid fitness\" [@leybourne2021]\n\n- \"\\[what is\\] the influence of nitrogen-fixing trees on soil nitrous oxide emissions\" [@kou-giesbrecht2021]\n\nThere is not a single precise answer to any of these questions, nor to the questions we posed to analysts in our study.\nAnd this lack of single clear answers will obviously continue to cause uncertainty since ecologists and evolutionary biologists conceive of the different answers from the different statistical models as all being answers to the same general question.\nA possible response would be a call to avoid these general questions in favor of much more precise alternatives [@auspurg2021].\nHowever, the research community rewards researchers who pose broad questions [@simons2017], and so researchers are unlikely to narrow their scope without a change in incentives.\nFurther, we suspect that even if individual studies specified narrow research questions, other scientists would group these more narrow questions into broader categories, for instance in meta-analyses, because it is these broader and more general questions that often interest the research community.\n\nAlthough variability in statistical outcomes among analysts may be inevitable, our results raise questions about why this variability differed between our two datasets.\nWe are particularly interested in the differences in the distribution of $Z_r$ since the distributions of out-of-sample predictions were on different scales for the two datasets, thus limiting the value of comparisons.\nThe forest plots of $Z_r$ from our two datasets showed distinct patterns, and these differences are consistent with several alternative hypotheses.\nThe results submitted by analysts of the *Eucalyptus* dataset showed a small average (close to zero) with most estimates also close to zero (± 0.2), though about a third far enough above or below zero to cross the traditional threshold of statistical significance.\nThere were a small number of striking outliers that were very far from zero.\nIn contrast, the results submitted by analysts of the blue tit dataset showed an average much further from zero (- 0.35) and a much greater spread in the core distribution of estimates across the range of $Z_r$ values (± 0.5 from the mean), with few modest outliers.\nSo, why was there more spread in effect sizes (across the estimates that are not outliers) in the blue tit analyses relative to the *Eucalyptus* analyses?\n\nOne possible explanation for the lower heterogeneity among most *Eucalyptus* $Z_r$ effects is that weak relationships may limit the opportunities for heterogeneity in analytical outcome.\nSome evidence for this idea comes from two sets of \"many labs\" studies in psychology [@klein2014; @klein2018].\nIn these studies, many independent lab groups each replicated a large set of studies, including, for each study, the experiment, data collection, and statistical analyses.\nThese studies showed that, when the meta-analytic mean across the replications from different labs was small, there was much less heterogeneity among the outcomes than when the mean effect sizes were large [@klein2014; @klein2018].\nOf course, a weak average effect size would not prevent divergent effects in all circumstances.\nAs we saw with the *Eucalyptus* analyses, taking a radically smaller subset of the data can lead to dramatically divergent effect sizes even when the mean with the full dataset is close to zero.\n\nOur observation that dramatic sub-setting in the *Eucalyptus* dataset was associated with correspondingly dramatic divergence in effect sizes leads us towards another hypothesis to explain the differences in heterogeneity between the *Eucalyptus* and blue tit analysis sets.\nIt may be that when analysts often divide a dataset into subsets, the result will be greater heterogeneity in analytical outcome for that dataset.\nAlthough we saw sub-setting associated with dramatic outliers in the *Eucalyptus* dataset, nearly all other analyses of *Eucalyptus* data used close to the same set of 351 samples, and as we saw, these effects did not vary substantially.\nHowever, analysts often analyzed only a subset of the blue tit data, and as we observed, sample sizes were much more variable among blue tit effects, and the effects themselves were also much more variable.\nImportant to note here is that subsets of data may differ from each other for biological reasons, but they may also differ due to sampling error.\nSampling error is a function of sample size, and sub-samples are, by definition, smaller samples, and so more subject to variability in effects due to sampling error [@jennions2013].\n\nOther features of datasets are also plausible candidates for driving heterogeneity in analytical outcomes, including features of covariates.\nIn particular, relationships between covariates and the response variable as well as relationships between covariates and the primary independent variable (collinearity) can strongly influence the modeled relationship between the independent variable of interest and the dependent variable [@morrissey2018; @dormann2013].\nTherefore, inclusion or exclusion of these covariates can drive heterogeneity in effect sizes ($Z_r$).\nAlso, as we saw with the two most extreme $Z_r$ values from the blue tit analyses, in multivariate models with collinear predictors, extreme effects can emerge when estimating partial correlation coefficients due to high collinearity, and conclusions can differ dramatically depending on which relationship receives the researcher's attention.\nTherefore, differences between datasets in the presence of strong and/or collinear covariates could influence the differences in heterogeneity in results among those datasets.\n\nAlthough it is too early in the many-analyst research program to conclude which analytical decisions or which features of datasets are the most important drivers of heterogeneity in analytical outcomes, we must still grapple with the possibility that analytical outcomes may vary substantially based on the choices we make as analysts.\nIf we assume that, at least sometimes, different analysts will produce dramatically different statistical outcomes, what should we do as ecologists and evolutionary biologists?\nWe review some ideas below.\n\nThe easiest path forward after learning about this analytical heterogeneity would be simply to continue with \"business as usual\", where researchers report results from a small number of statistical models.\nA case could be made for this path based on our results.\nFor instance, among the blue tit analyses, the precise values of the estimated $Z_r$ effects varied substantially, but the average effect was convincingly different from zero, and a majority of individual effects (84%) were in the same direction.\nArguably, many ecologists and evolutionary biologists appear primarily interested in the direction of a given effect and the corresponding p-value [@fidler2006], and so the variability we observed when analyzing the blue tit dataset may not worry these researchers.\nSimilarly, most effects from the *Eucalyptus* analyses were relatively close to zero, and about two-thirds of these effects did not cross the traditional threshold of statistical significance.\nTherefore, a large proportion of people analyzing these data would conclude that there was no effect, and this is consistent with what we might conclude from the meta-analysis.\n\nHowever, we find the counter arguments to \"business as usual\" to be compelling.\nFor blue tits, there were a substantial minority of calculated effects that would be interpreted by many biologists as indicating the absence of an effect (28%), and there were three traditionally 'significant' effects in the opposite direction to the average.\nThe qualitative conclusions of analysts also reflected substantial variability, with fully half of teams drawing a conclusion distinct from the one we draw from the distribution as a whole.\nThese teams with different conclusion were either uncertain about the negative relationship between competition and nestling growth, or they concluded that effects were mixed or absent.\nFor the *Eucalyptus* analyses, this issue is more concerning.\nAround two-thirds of effects had confidence intervals overlapping zero, and of the third of analyses with confidence intervals excluding zero, almost half were positive, and the rest were negative.\nAccordingly, the qualitative conclusions of the *Eucalyptus* teams were spread across the full range of possibilities.\nBut even this striking lack of consensus may be much less of a problem than what could emerge as scientists select which results to publish.\n\nA potentially larger argument against \"business as usual\" is that it provides the raw material for biasing the literature.\nWhen different model specifications readily lead to different results, analysts may be tempted to report the result that appears most interesting, or that is most consistent with expectation [@gelman2013; @forstmeier2017].\nThere is growing evidence that researchers in ecology and evolutionary biology often report a biased subset of the results they produce [@deressa2023; @kimmel2023], and that this bias exaggerates the average size of effects in the published literature between 30 and 150% [@yang2023; @parker2023].\nThe bias then accumulates in meta-analyses, apparently more than doubling the rate of conclusions of \"statistical significance\" in published meta-analyses above what would have been found in the absence of bias [@yang2023].\nThus, \"business as usual\" does not just create noisy results, it helps create systematically misleading results.\n\nIf we move away from “business as usual”, where do we go? \nMany obvious options involve multiple analyses per dataset. \nFor instance, there is the traditional robustness or sensitivity check [e.g., @pei2020; @briga2021], in which the researcher presents several alternative versions of an analysis to demonstrate that the result is ‘robust’ [@lu2014]. \nUnfortunately, robustness checks are at risk of the same potential biases of reporting found in other studies [@silberzahn2018], especially given the relatively few models typically presented. \nHowever, these risks could be minimized by running more models and doing so with a pre-registration or registered report. \nAnother option is model averaging. \nAverages across models often perform well [e.g., @taylor2023], and in some forms this may be a relatively simple solution. \nModel averaging, as most often practiced in ecology and evolutionary biology, involves first identifying a small suite of candidate models [see @burnham2002], then using Akaike weights, based on Akaike’s Information Criterion (AIC), to calculate weighted averages for parameter estimates from those models. \nAs with typical robustness checks, the small number of models limits the exploration of specification space, but examining a larger number of models could become the norm. \nHowever, there are more concerning limitations. \nThe largest of these limitations is that averaging regression coefficients is problematic when models differ in interaction terms or collinear variables [@cade2015]. \nAdditionally, weighting by AIC may often be inconsistent with our modelling goals. AIC balances the trade-off between model complexity and predictive ability, but penalizing models for complexity may not be suited for testing hypotheses about causation. \nSo, AIC may often not offer the weight we want to use, and we may also not wish to just generate an average at all. \nInstead, if we hope to understand an extensive universe of possible modelling outcomes, we could conduct a multiverse analysis, possibly with a specification curve [@simonsohn2015; @simonsohn2020]. \nThis could mean running hundreds or thousands of models (or more!) to examine the distribution of possible effects, and to see how different model specification choices map onto these effects. \nHowever, exploring large areas of specification space may come at the cost of including biologically implausible specifications. \nThus, we expect a trade-off, and attempts to limit models to the most biologically plausible may become increasingly difficult in proportion to the number of variables and modeling choices. \nTo make selecting plausible models easier, one could recruit multiple analysts to design one or a few plausible specifications each as with our ‘many analyst’ study [@silberzahn2018]. \nAn alternative that may be more labor intensive for the primary analyst, but which may lead to a more plausible set of models, could involve hypothesizing about causal pathways with DAGs [directed acyclic graphs; @arif2023] to constrain the model set. \nAs with other options outlined above, generating model specifications with DAGs could be partnered with pre-registration to hinder bias from undisclosed data dredging. \n\nResponses to heterogeneity in analysis outcomes need not be limited to simply conducting more analyses, especially if it turns out that analysis quality drives some of the observed heterogeneity. \nAs we noted above, we cannot yet rule out the possibility that insufficient statistical expertise or poor-quality analyses might drive some portion of the heterogeneity we observed. \nImproving the quality of analyses might be accomplished with a deliberate increase in investment in statistical education. \nMany ecology and evolutionary biology students learn their statistical practice informally, with many ecology doctoral programs in the USA not requiring a statistics course [@touchon2016], and no formal courses of any kind included in doctoral degrees in most other countries. \nIn cases where formal investment in statistical education is lacking, informal resources, such as guidelines and checklists, may help researchers avoid common mistakes. \nHowever, unless following guidelines or checklists in enforced for publication, the adherence to guidelines is patchy. \nFor example, despite the publication of guidelines for conducting meta-analyses in ecology, the quality of meta-analyses did not improve substantially over time [@koricheva2014]. \nEven in medical research where adherence to guidelines such as the PRISMA standards for systematic reviews and meta-analyses is more highly valued, adherence is often poor [@page2017].\n\nAlthough we have reviewed a variety of potential responses to the existence of variability in analytical outcomes, we certainly do not wish to imply that this is a comprehensive set of possible responses. \nNor do we wish to imply that the opinions we have expressed about these options are correct. \nDetermining how the disciplines of ecology and evolutionary biology should respond to knowledge of the variability in analytical outcome will benefit from the contribution and discussion of ideas from across these disciplines. \nWe look forward to learning from these discussions and to seeing how these disciplines ultimately respond.\n\n# Conclusions\n\nOverall, our results suggest to us that, where there is a diverse set of plausible analysis options, no single analysis should be considered a complete or reliable answer to a research question.\nFurther, because of the evidence that ecologists and evolutionary biologists often present a biased subset of the analyses they conduct [@deressa2023; @yang2023; @kimmel2023], we do not expect that even a collection of different effect sizes from different studies will accurately represent the true distribution of effects [@yang2023].\nTherefore, we believe that an increased level of skepticism of the outcomes of single analyses, or even single meta-analyses, is warranted going forward.\nWe recognize that some researchers have long maintained a healthy level of skepticism of individual studies as part of sound and practical scientific practice, and it is possible that those researchers will be neither surprised nor concerned by our results.\nHowever, we doubt that many researchers are sufficiently aware of the potential problems of analytical flexibility to be appropriately skeptical.\nWe hope that our work leads to conversations in ecology, evolutionary biology, and other disciplines about how best to contend with heterogeneity in results that is attributable to analytical decisions.\n\n# Declarations\n\n## Ethics, consent and permissions\n\nWe obtained permission to conduct this research from the Whitman College Institutional Review Board (IRB).\nAs part of this permission, the IRB approved the consent form () that all participants completed prior to joining the study.\nThe authors declare that they have no competing interests.\n\n## Availability of data and materials\n\nAll materials and data are archived and hosted on the OSF at [https://osf.io/mn5aj/](https://osf.io/mn5aj/), including survey instruments and analyst / reviewer consent forms. The Evolutionary Ecology Data and Ecology and Conservation Data provided to analysts are available at [https://osf.io/34fzc/](https://osf.io/34fzc/) and [https://osf.io/t76uy/](https://osf.io/t76uy/) respectively. Data has been anonymised, and the non-anonymised data is stored on the project OSF within private components accessible to the lead authors. \n\nWe built an R package, `ManyEcoEvo` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo` package repository and its outputs are made available to users of the package when the library is loaded.\n\nThe full manuscript, including further analysis and presentation of results is written in Quarto [@AllaireQuarto2024]. The source code to reproduce the manuscript is hosted at [https://github.com/egouldo/ManyAnalysts/](https://github.com/egouldo/ManyAnalysts/), and the rendered version of the source code may be viewed at [https://egouldo.github.io/ManyAnalysts/](https://egouldo.github.io/ManyAnalysts/). All R packages and their versions used in the production of this manuscript are listed in the session info at @sec-sesion-info.\n\n## Competing interests\n\nThe authors declare that they have no competing interests\n\n## Funding\n\nEG's contributions were supported by an Australian Government Research Training Program Scholarship, AIMOS top-up scholarship (2022) and Melbourne Centre of Data Science Doctoral Academy Fellowship (2021).\nFF's contributions were supported by ARC Future Fellowship FT150100297.\n\n## Author's contributions\n\nHF, THP and FF conceptualized the project.\nPV provided raw data for *Eucalyptus* analyses and SG and THP provided raw data for blue tit analyses.\nDGH, HF and THP prepared surveys for collecting participating analysts and reviewer's data.\nEG, HF, THP, PV, SN and FF planned the analyses of the data provided by our analysts and reviewers, EG, HF, and THP curated the data, EG and HF wrote the software code to implement the analyses and prepare data visualisations.\nEG ensured that analyses were documented and reproducible.\nTHP and HF administered the project, including coordinating with analysts and reviewers.\nFF provided funding for the project.\nTHP, HF, and EG wrote the manuscript.\nAuthors listed alphabetically contributed analyses of the primary datasets or reviews of analyses.\nAll authors read and approved the final manuscript.\n\n## References {.unnumbered}\n\n::: {#refs}\n:::\n\n## Session Info {#sec-sesion-info}\n\n\n\n\n::: {#tbl-grateful-pkg-list .cell tbl-cap='R packages used to generate this manuscript. Please see the ManyEcoEvo package for a full list of packages used in the analysis pipeline.'}\n::: {.cell-output-display}\n\n\n|Package |Version |Citation |\n|:----------------|:--------|:----------------------|\n|base |4.4.0 |@base |\n|betapart |1.6 |@betapart |\n|broom.mixed |0.2.9.5 |@broommixed |\n|colorspace |2.1.0 |@colorspace2020a |\n|cowplot |1.1.3 |@cowplot |\n|devtools |2.4.5 |@devtools |\n|EnvStats |2.8.1 |@EnvStats-book |\n|GGally |2.2.1 |@GGally |\n|ggforestplot |0.1.0 |@ggforestplot |\n|ggh4x |0.2.8 |@ggh4x |\n|ggpubr |0.6.0 |@ggpubr |\n|ggrepel |0.9.5 |@ggrepel |\n|ggthemes |5.1.0 |@ggthemes |\n|glmmTMB |1.1.8 |@glmmTMB |\n|gt |0.10.1 |@gt |\n|gtsummary |1.7.2 |@gtsummary |\n|here |1.0.1 |@here |\n|Hmisc |5.1.2 |@Hmisc |\n|irr |0.84.1 |@irr |\n|janitor |2.2.0 |@janitor |\n|knitr |1.46 |@knitr2024 |\n|latex2exp |0.9.6 |@latex2exp |\n|lme4 |1.1.35.3 |@lme4 |\n|ManyEcoEvo |1.1.0 |@ManyEcoEvo |\n|metafor |4.6.0 |@metafor |\n|modelbased |0.8.7 |@modelbased |\n|multilevelmod |1.0.0 |@multilevelmod |\n|MuMIn |1.47.5 |@MuMIn |\n|naniar |1.1.0 |@naniar |\n|NatParksPalettes |0.2.0 |@NatParksPalettes |\n|orchaRd |2.0 |@orchaRd |\n|parameters |0.21.7 |@parameters |\n|patchwork |1.2.0 |@patchwork |\n|performance |0.11.0 |@performance |\n|renv |1.0.2 |@renv |\n|rmarkdown |2.27 |@rmarkdown2024 |\n|sae |1.3 |@molina-marhuenda:2015 |\n|scales |1.3.0 |@scales |\n|see |0.8.4 |@see |\n|showtext |0.9.7 |@showtext |\n|specr |1.0.0 |@specr |\n|targets |1.7.0 |@targets |\n|tidymodels |1.1.1 |@tidymodels |\n|tidytext |0.4.2 |@tidytext |\n|tidyverse |2.0.0 |@tidyverse |\n|withr |3.0.0 |@withr |\n|xfun |0.44 |@xfun |\n\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ndevtools::session_info()\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n─ Session info ───────────────────────────────────────────────────────────────\n setting value\n version R version 4.4.0 (2024-04-24)\n os macOS Ventura 13.6.9\n system aarch64, darwin20\n ui X11\n language (EN)\n collate en_US.UTF-8\n ctype en_US.UTF-8\n tz Australia/Melbourne\n date 2024-09-17\n pandoc 3.1.12.2 @ /opt/homebrew/bin/ (via rmarkdown)\n\n─ Packages ───────────────────────────────────────────────────────────────────\n ! package * version date (UTC) lib source\n abind 1.4-8 2024-09-12 [1] RSPM (R 4.4.0)\n P ape 5.8 2024-04-11 [?] CRAN (R 4.4.0)\n P backports 1.5.0 2024-05-23 [?] RSPM\n P base64enc 0.1-3 2015-07-28 [?] RSPM\n bayestestR 0.14.0.2 2024-07-28 [1] https://easystats.r-universe.dev (R 4.4.0)\n P beeswarm 0.4.0 2021-06-01 [?] CRAN (R 4.4.0)\n P betapart 1.6 2023-03-13 [?] CRAN (R 4.4.0)\n P blastula 0.3.5 2024-02-24 [?] RSPM\n P bookdown * 0.40 2024-07-02 [?] RSPM\n P boot 1.3-30 2024-02-26 [?] CRAN (R 4.4.0)\n P broom * 1.0.6 2024-05-17 [?] RSPM\n P broom.helpers 1.15.0 2024-04-05 [?] CRAN (R 4.4.0)\n P broom.mixed * 0.2.9.5 2024-04-01 [?] CRAN (R 4.4.0)\n P cachem 1.1.0 2024-05-16 [?] RSPM\n P car 3.1-2 2023-03-30 [?] CRAN (R 4.4.0)\n P carData 3.0-5 2022-01-06 [?] CRAN (R 4.4.0)\n P checkmate 2.3.1 2023-12-04 [?] CRAN (R 4.4.0)\n P class 7.3-22 2023-05-03 [?] CRAN (R 4.4.0)\n cli 3.6.3 2024-06-21 [1] RSPM (R 4.4.0)\n P cluster 2.1.6 2023-12-01 [?] CRAN (R 4.4.0)\n P coda 0.19-4.1 2024-01-31 [?] CRAN (R 4.4.0)\n P codetools 0.2-20 2024-03-31 [?] CRAN (R 4.4.0)\n colorspace 2.1-1 2024-07-26 [1] RSPM (R 4.4.0)\n P commonmark 1.9.1 2024-01-30 [?] RSPM\n P cowplot 1.1.3 2024-01-22 [?] CRAN (R 4.4.0)\n curl 5.2.2 2024-08-26 [1] RSPM (R 4.4.0)\n data.table 1.16.0 2024-08-27 [1] RSPM (R 4.4.0)\n P datawizard 0.12.2 2024-07-21 [?] RSPM\n P devtools 2.4.5 2022-10-11 [?] RSPM\n P dials * 1.2.1 2024-02-22 [?] CRAN (R 4.4.0)\n P DiceDesign 1.10 2023-12-07 [?] CRAN (R 4.4.0)\n digest 0.6.37 2024-08-19 [1] RSPM (R 4.4.0)\n P doSNOW 1.0.20 2022-02-04 [?] CRAN (R 4.4.0)\n P dplyr * 1.1.4 2023-11-17 [?] RSPM\n P ellipsis 0.3.2 2021-04-29 [?] RSPM\n P emmeans 1.10.2 2024-05-20 [?] CRAN (R 4.4.0)\n P EnvStats 2.8.1 2023-08-22 [?] CRAN (R 4.4.0)\n P estimability 1.5.1 2024-05-12 [?] CRAN (R 4.4.0)\n P evaluate 0.24.0 2024-06-10 [?] RSPM\n P fansi 1.0.6 2023-12-08 [?] RSPM\n P farver 2.1.2 2024-05-13 [?] RSPM\n P fastmap 1.2.0 2024-05-15 [?] RSPM\n P fastmatch 1.1-4 2023-08-18 [?] CRAN (R 4.4.0)\n P forcats * 1.0.0 2023-01-29 [?] RSPM\n P foreach 1.5.2 2022-02-02 [?] CRAN (R 4.4.0)\n P foreign 0.8-86 2023-11-28 [?] CRAN (R 4.4.0)\n P Formula 1.2-5 2023-02-24 [?] CRAN (R 4.4.0)\n P fs 1.6.4 2024-04-25 [?] RSPM\n P furrr 0.3.1 2022-08-15 [?] RSPM\n P future 1.33.2 2024-03-26 [?] RSPM\n P future.apply 1.11.2 2024-03-28 [?] CRAN (R 4.4.0)\n P generics 0.1.3 2022-07-05 [?] RSPM\n geometry 0.5.0 2024-08-31 [1] RSPM (R 4.4.0)\n P ggbeeswarm 0.7.2 2023-04-29 [?] CRAN (R 4.4.0)\n ggeffects 1.6.0 2024-05-18 [2] CRAN (R 4.4.0)\n P ggforestplot 0.1.0 2024-06-16 [?] Github (NightingaleHealth/ggforestplot@547617e)\n P ggplot2 * 3.5.1 2024-04-23 [?] RSPM\n P ggpubr 0.6.0 2023-02-10 [?] CRAN (R 4.4.0)\n P ggsignif 0.6.4 2022-10-13 [?] CRAN (R 4.4.0)\n P globals 0.16.3 2024-03-08 [?] RSPM\n P glue 1.7.0 2024-01-09 [?] RSPM\n P gower 1.0.1 2022-12-22 [?] CRAN (R 4.4.0)\n P GPfit 1.0-8 2019-02-08 [?] CRAN (R 4.4.0)\n P grateful * 0.2.4 2023-10-22 [?] CRAN (R 4.4.0)\n P gridExtra 2.3 2017-09-09 [?] CRAN (R 4.4.0)\n gt * 0.11.0 2024-07-09 [1] RSPM (R 4.4.0)\n P gtable 0.3.5 2024-04-22 [?] RSPM\n P gtsummary 1.7.2 2023-07-15 [?] CRAN (R 4.4.0)\n P hardhat 1.4.0 2024-06-02 [?] CRAN (R 4.4.0)\n P haven 2.5.4 2023-11-30 [?] RSPM\n P here * 1.0.1 2020-12-13 [?] RSPM\n P Hmisc * 5.1-3 2024-05-28 [?] CRAN (R 4.4.0)\n P hms 1.1.3 2023-03-21 [?] RSPM\n P htmlTable 2.4.2 2023-10-29 [?] CRAN (R 4.4.0)\n P htmltools 0.5.8.1 2024-04-04 [?] RSPM\n P htmlwidgets 1.6.4 2023-12-06 [?] RSPM\n P httpuv 1.6.15 2024-03-26 [?] RSPM\n P igraph 2.0.3 2024-03-13 [?] CRAN (R 4.4.0)\n P infer * 1.0.7 2024-03-25 [?] CRAN (R 4.4.0)\n insight 0.20.4 2024-09-01 [1] RSPM (R 4.4.0)\n P ipred 0.9-14 2023-03-09 [?] CRAN (R 4.4.0)\n P irr * 0.84.1 2019-01-26 [?] CRAN (R 4.4.0)\n P iterators 1.0.14 2022-02-05 [?] CRAN (R 4.4.0)\n P itertools 0.1-3 2014-03-12 [?] CRAN (R 4.4.0)\n P janitor 2.2.0 2023-02-02 [?] RSPM\n P jsonlite 1.8.8 2023-12-04 [?] RSPM\n P juicyjuice 0.1.0 2022-11-10 [?] RSPM\n knitr 1.48 2024-07-07 [1] RSPM (R 4.4.0)\n P labeling 0.4.3 2023-08-29 [?] RSPM\n P later 1.3.2 2023-12-06 [?] RSPM\n P lattice 0.22-6 2024-03-20 [?] CRAN (R 4.4.0)\n P lava 1.8.0 2024-03-05 [?] CRAN (R 4.4.0)\n P lazyeval 0.2.2 2019-03-15 [?] CRAN (R 4.4.0)\n P lhs 1.1.6 2022-12-17 [?] CRAN (R 4.4.0)\n P lifecycle 1.0.4 2023-11-07 [?] RSPM\n P listenv 0.9.1 2024-01-29 [?] RSPM\n lme4 * 1.1-35.5 2024-07-03 [1] RSPM (R 4.4.0)\n lpSolve * 5.6.21 2024-09-12 [1] RSPM (R 4.4.0)\n P lubridate * 1.9.3 2023-09-27 [?] RSPM\n P magic 1.6-1 2022-11-16 [?] CRAN (R 4.4.0)\n P magrittr 2.0.3 2022-03-30 [?] RSPM\n ManyEcoEvo * 2.7.6.9004 2024-09-16 [2] Github (egouldo/ManyEcoEvo@b485185)\n P markdown 1.13 2024-06-04 [?] RSPM\n P MASS 7.3-60.2 2024-04-24 [?] local\n P mathjaxr 1.6-0 2022-02-28 [?] CRAN (R 4.4.0)\n P Matrix * 1.7-0 2024-03-22 [?] CRAN (R 4.4.0)\n P memoise 2.0.1 2021-11-26 [?] RSPM\n P metadat * 1.2-0 2022-04-06 [?] CRAN (R 4.4.0)\n P metafor * 4.6-0 2024-03-28 [?] CRAN (R 4.4.0)\n metaviz 0.3.1 2020-04-09 [2] CRAN (R 4.4.0)\n P mgcv 1.9-1 2023-12-21 [?] CRAN (R 4.4.0)\n P mime 0.12 2021-09-28 [?] RSPM\n P miniUI 0.1.1.1 2018-05-18 [?] RSPM\n P minpack.lm 1.2-4 2023-09-11 [?] CRAN (R 4.4.0)\n minqa 1.2.8 2024-08-17 [1] RSPM (R 4.4.0)\n P modelbased * 0.8.8 2024-06-11 [?] RSPM\n P modeldata * 1.3.0 2024-01-21 [?] CRAN (R 4.4.0)\n multcomp 1.4-25 2023-06-20 [2] CRAN (R 4.4.0)\n P MuMIn * 1.47.5 2023-03-22 [?] CRAN (R 4.4.0)\n P munsell 0.5.1 2024-04-01 [?] RSPM\n P mvtnorm 1.2-5 2024-05-21 [?] CRAN (R 4.4.0)\n P NatParksPalettes 0.2.0 2022-10-09 [?] CRAN (R 4.4.0)\n P nlme 3.1-164 2023-11-27 [?] CRAN (R 4.4.0)\n nloptr 2.1.1 2024-06-25 [1] RSPM (R 4.4.0)\n P nnet 7.3-19 2023-05-03 [?] CRAN (R 4.4.0)\n P numDeriv * 2016.8-1.1 2019-06-06 [?] CRAN (R 4.4.0)\n P orchaRd 2.0 2024-06-08 [?] Github (daniel1noble/orchaRd@15423d3)\n P parallelly 1.37.1 2024-02-29 [?] RSPM\n parameters * 0.22.1.7 2024-07-25 [1] https://easystats.r-universe.dev (R 4.4.0)\n P parsnip * 1.2.1 2024-03-22 [?] CRAN (R 4.4.0)\n P patchwork * 1.2.0 2024-01-08 [?] CRAN (R 4.4.0)\n P pbkrtest 0.5.2 2023-01-19 [?] CRAN (R 4.4.0)\n performance 0.12.2 2024-07-21 [1] https://easystats.r-universe.dev (R 4.4.0)\n P permute 0.9-7 2022-01-27 [?] CRAN (R 4.4.0)\n P picante 1.8.2 2020-06-10 [?] CRAN (R 4.4.0)\n P pillar 1.9.0 2023-03-22 [?] RSPM\n P pkgbuild 1.4.4 2024-03-17 [?] RSPM\n P pkgconfig 2.0.3 2019-09-22 [?] RSPM\n pkgload 1.4.0 2024-06-28 [1] RSPM (R 4.4.0)\n P pointblank 0.12.1 2024-03-25 [?] RSPM\n P prodlim 2023.08.28 2023-08-28 [?] CRAN (R 4.4.0)\n P profvis 0.3.8 2023-05-02 [?] RSPM\n P promises 1.3.0 2024-04-05 [?] RSPM\n P purrr * 1.0.2 2023-08-10 [?] RSPM\n P R6 2.5.1 2021-08-19 [?] RSPM\n P rcdd 1.6 2023-12-15 [?] CRAN (R 4.4.0)\n Rcpp 1.0.13 2024-07-17 [1] RSPM (R 4.4.0)\n P readr * 2.1.5 2024-01-10 [?] RSPM\n P recipes * 1.0.10 2024-02-18 [?] CRAN (R 4.4.0)\n P remotes 2.5.0 2024-03-17 [?] RSPM\n renv 1.0.7 2024-04-11 [1] CRAN (R 4.4.0)\n P rlang 1.1.4 2024-06-04 [?] RSPM\n rmarkdown * 2.28 2024-08-17 [1] RSPM (R 4.4.0)\n P rpart 4.1.23 2023-12-05 [?] CRAN (R 4.4.0)\n P rprojroot 2.0.4 2023-11-05 [?] RSPM\n P rsample * 1.2.1 2024-03-25 [?] CRAN (R 4.4.0)\n P rstatix 0.7.2 2023-02-01 [?] CRAN (R 4.4.0)\n P rstudioapi 0.16.0 2024-03-24 [?] RSPM\n P sae 1.3 2020-03-01 [?] CRAN (R 4.4.0)\n sandwich 3.1-0 2023-12-11 [2] CRAN (R 4.4.0)\n P sass 0.4.9 2024-03-15 [?] RSPM\n P scales * 1.3.0 2023-11-28 [?] RSPM\n see 0.8.5 2024-07-17 [1] https://easystats.r-universe.dev (R 4.4.0)\n P sessioninfo 1.2.2 2021-12-06 [?] RSPM\n P shiny 1.8.1.1 2024-04-02 [?] RSPM\n sjlabelled 1.2.0 2022-04-10 [2] CRAN (R 4.4.0)\n P snakecase 0.11.1 2023-08-27 [?] RSPM\n P snow 0.4-4 2021-10-27 [?] CRAN (R 4.4.0)\n P specr * 1.0.0 2023-01-20 [?] CRAN (R 4.4.0)\n P stringi 1.8.4 2024-05-06 [?] RSPM\n P stringr * 1.5.1 2023-11-14 [?] RSPM\n P survival 3.5-8 2024-02-14 [?] CRAN (R 4.4.0)\n TH.data 1.1-2 2023-04-17 [2] CRAN (R 4.4.0)\n P tibble * 3.2.1 2023-03-20 [?] RSPM\n P tidymodels * 1.2.0 2024-03-25 [?] CRAN (R 4.4.0)\n P tidyr * 1.3.1 2024-01-24 [?] RSPM\n P tidyselect 1.2.1 2024-03-11 [?] RSPM\n P tidyverse * 2.0.0 2023-02-22 [?] RSPM\n P timechange 0.3.0 2024-01-18 [?] RSPM\n P timeDate 4032.109 2023-12-14 [?] CRAN (R 4.4.0)\n timetk 2.9.0 2023-10-31 [2] CRAN (R 4.4.0)\n P tune * 1.2.1 2024-04-18 [?] CRAN (R 4.4.0)\n P tzdb 0.4.0 2023-05-12 [?] RSPM\n P urlchecker 1.0.1 2021-11-30 [?] RSPM\n P usethis 2.2.3 2024-02-19 [?] RSPM\n P utf8 1.2.4 2023-10-22 [?] RSPM\n V8 5.0.0 2024-08-16 [1] RSPM (R 4.4.0)\n P vctrs 0.6.5 2023-12-01 [?] RSPM\n vegan 2.6-8 2024-08-28 [1] RSPM (R 4.4.0)\n P vipor 0.4.7 2023-12-18 [?] CRAN (R 4.4.0)\n withr 3.0.1 2024-07-31 [1] RSPM (R 4.4.0)\n P workflows * 1.1.4 2024-02-19 [?] CRAN (R 4.4.0)\n P workflowsets * 1.1.0 2024-03-21 [?] CRAN (R 4.4.0)\n xfun 0.47 2024-08-17 [1] RSPM (R 4.4.0)\n P xml2 1.3.6 2023-12-04 [?] RSPM\n P xtable 1.8-4 2019-04-21 [?] RSPM\n xts 0.14.0 2024-06-05 [2] CRAN (R 4.4.0)\n yaml 2.3.10 2024-07-26 [1] RSPM (R 4.4.0)\n P yardstick * 1.3.1 2024-03-21 [?] CRAN (R 4.4.0)\n zoo 1.8-12 2023-04-13 [2] CRAN (R 4.4.0)\n\n [1] /Users/elliotgould/Documents/GitHub/ManyAnalysts/renv/library/macos/R-4.4/aarch64-apple-darwin20\n [2] /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library\n\n P ── Loaded and on-disk path mismatch.\n\n──────────────────────────────────────────────────────────────────────────────\n```\n\n\n:::\n:::\n", "supporting": [ "index_files/figure-html" ], diff --git a/_freeze/index/figure-html/fig-cat-peer-rating-1.png b/_freeze/index/figure-html/fig-cat-peer-rating-1.png index 0d3a150..dddbeb3 100644 Binary files a/_freeze/index/figure-html/fig-cat-peer-rating-1.png and b/_freeze/index/figure-html/fig-cat-peer-rating-1.png differ diff --git a/_freeze/index/figure-html/fig-cat-peer-rating-2.png b/_freeze/index/figure-html/fig-cat-peer-rating-2.png deleted file mode 100644 index 932f78a..0000000 Binary files a/_freeze/index/figure-html/fig-cat-peer-rating-2.png and /dev/null differ diff --git a/_freeze/index/figure-html/fig-euc-yi-forest-plot-1.png b/_freeze/index/figure-html/fig-euc-yi-forest-plot-1.png index ae00238..c48d0f9 100644 Binary files a/_freeze/index/figure-html/fig-euc-yi-forest-plot-1.png and b/_freeze/index/figure-html/fig-euc-yi-forest-plot-1.png differ diff --git a/_freeze/index/figure-html/fig-forest-plot-bt-yi-1.png b/_freeze/index/figure-html/fig-forest-plot-bt-yi-1.png index 5e26606..d8ea3a9 100644 Binary files a/_freeze/index/figure-html/fig-forest-plot-bt-yi-1.png and b/_freeze/index/figure-html/fig-forest-plot-bt-yi-1.png differ diff --git a/_freeze/index/figure-html/fig-forest-plots-Zr-1.png b/_freeze/index/figure-html/fig-forest-plots-Zr-1.png index e79f0fa..9f8aa0c 100644 Binary files a/_freeze/index/figure-html/fig-forest-plots-Zr-1.png and b/_freeze/index/figure-html/fig-forest-plots-Zr-1.png differ diff --git a/_freeze/index/figure-html/fig-mixed-effect-marginal-means-plot-1.png b/_freeze/index/figure-html/fig-mixed-effect-marginal-means-plot-1.png index e7e301e..1d7f13d 100644 Binary files a/_freeze/index/figure-html/fig-mixed-effect-marginal-means-plot-1.png and b/_freeze/index/figure-html/fig-mixed-effect-marginal-means-plot-1.png differ diff --git a/_freeze/index/figure-html/fig-sorensen-plots-1.png b/_freeze/index/figure-html/fig-sorensen-plots-1.png index 146a15b..8629b5d 100644 Binary files a/_freeze/index/figure-html/fig-sorensen-plots-1.png and b/_freeze/index/figure-html/fig-sorensen-plots-1.png differ diff --git a/_freeze/index/figure-html/fig-sorensen-plots-2.png b/_freeze/index/figure-html/fig-sorensen-plots-2.png index 67a0fe5..2f42f26 100644 Binary files a/_freeze/index/figure-html/fig-sorensen-plots-2.png and b/_freeze/index/figure-html/fig-sorensen-plots-2.png differ diff --git a/_freeze/supp_mat/SM1_summary/execute-results/html.json b/_freeze/supp_mat/SM1_summary/execute-results/html.json index fcab385..ddc458a 100644 --- a/_freeze/supp_mat/SM1_summary/execute-results/html.json +++ b/_freeze/supp_mat/SM1_summary/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "70fb892fd4d3326d9f42c21171b600f5", + "hash": "5c46474b856a738deb0d3e2842887612", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Summarising Variation Among Analysis Specifications\"\nformat: html\neditor: visual\ncode-fold: true\nauthor:\n - name: Elliot Gould\n - name: Hannah S Fraser\nexecute:\n freeze: auto # re-render only when source changes\nbibliography: ../ms/references.bib\nnumber-sections: true\ntbl-cap-location: top\neditor_options: \n chunk_output_type: console\n---\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(targets)\nlibrary(withr)\nlibrary(here)\nlibrary(metafor)\nlibrary(ManyEcoEvo)\nlibrary(tidyverse)\nlibrary(broom)\nlibrary(gt)\nlibrary(specr)\nlibrary(colorspace)\nlibrary(ggthemes)\nlibrary(ggh4x)\nlibrary(showtext)\n\nset.seed(1234)\n# extrafont::font_install(\"Lato\")\n```\n:::\n\n\n\n\n## Summary Statistics\n\n### Number of analyses of different types\n\nAs described in the summary statistics section of the manuscript, 63 teams submitted 131 $Z_r$ model estimates and 43 teams submitted 64 $y_i$ predictions for the blue tit dataset. Similarly, 40 submitted 79 $Z_r$ model estimates and 14 teams submitted 24 $y_i$ predictions for the *Eucalytpus* dataset. The majority of the blue tit analyses specified normal error distributions and were non-Bayesian mixed effects models. Analyses of the *Eucalyptus* dataset rarely specified normal error distributions, likely because the response variable was in the form of counts. Mixed effects models were also common for *Eucalytpus* analyses (@tbl-Table1).\n\n\n::: {#tbl-Table1 .cell tbl-cap='Summary of the number of anaysis teams, total analyses, models with normal error distributions, mixed effects models, and models developed with Bayesian statistical methods for effect size analyses only ($Z_r$) and out-of-sample prediction only ($y_i$).'}\n\n```{.r .cell-code}\nTable1 %>% \n rename(subset = subset_name) %>% \n rename_with(~ str_remove(., \"sum_\")) %>% \n group_by(dataset) %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::cols_label(dataset = \"dataset\",\n subset = \"Subset\",\n totalanalyses = \"No. Analyses\",\n teams = \"No. Teams\",\n linear = \"Normal Distribution\",\n mixed = \"Mixed Effect\") %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::text_transform(fn = function(x) ifelse(x == \"eucalyptus\", \n gt::md(paste(\"*Eucalyptus*\")), x),\n locations = gt::cells_row_groups()) %>% \n gt::text_transform(fn = function(x) map(x, gt::md), \n locations = gt::cells_row_groups()) %>% \n gt::cols_move(teams,after = totalanalyses) %>% \n gt::as_raw_html()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
No. AnalysesNo. TeamsNormal DistributionMixed EffectBayesian
blue tit
$$Z_r$$1316312412810
$$y_i$$6443596310
Eucalyptus
$$Z_r$$794015625
$$y_i$$24141163
\n
\n```\n\n:::\n:::\n\n\n### Model composition\n\nThe composition of models varied substantially (@tbl-Table2) in regards to the number of fixed and random effects, interaction terms and the number of data points used. For the blue tit dataset, models used up to 19 fixed effects, 12 random effects, and 10 interaction terms and had sample sizes ranging from 76 to 3720. For the *Eucalyptus* dataset models had up to 13 fixed effects, 4 random effects, 5 interaction terms and sample sizes ranging from 18 to 351.\n\n\n::: {#tbl-Table2 .cell tbl-cap='Mean, standard deviation and range of number of fixed and random variables and interaction terms used in models and sample size used. Repeated for effect size analyses only ($Z_r$) and out-of-sample prediction only ($y_i$).'}\n\n```{.r .cell-code}\nTable2 %>% \n rename(SD = sd, subset = subset_name) %>% \n group_by(variable) %>% \n pivot_wider(\n names_from = dataset,\n names_sep = \".\",\n values_from = c(mean, SD, min, max)\n ) %>% \n mutate(variable = case_when(variable == \"samplesize\" ~ \"*N*\",\n TRUE ~ variable)) %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::row_group_order(groups = c(\"fixed\", \"random\", \"interactions\", \"*N*\")) %>% \n gt::tab_spanner_delim(delim = \".\") %>% \n gt::fmt_scientific(columns = c(contains(\"mean\"), contains(\"SD\")),\n decimals = 2) %>% \n gt::cols_label_with(fn = Hmisc::capitalize) %>% \n gt::tab_style(\n style = gt::cell_text(transform = \"capitalize\"),\n locations = gt::cells_column_spanners()\n ) %>% \n gt::cols_label_with(c(contains(\"Eucalyptus\")), \n fn = ~ gt::md(paste0(\"*\",.x, \"*\"))) %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::as_raw_html()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
\n mean\n \n SD\n \n min\n \n max\n
Blue titEucalyptusBlue titEucalyptusBlue titEucalyptusBlue titEucalyptus
fixed
$$Z_r$$5.205.012.923.83111913
$$y_i$$4.784.672.353.45111013
random
$$Z_r$$3.531.412.081.0900104
$$y_i$$4.429.60 × 10−12.788.10 × 10−110123
interactions
$$Z_r$$4.40 × 10−11.60 × 10−11.116.50 × 10−100105
$$y_i$$2.80 × 10−11.70 × 10−16.30 × 10−14.80 × 10−10032
*N*
$$Z_r$$2.61 × 1032.98 × 1029.37 × 1021.06 × 10276183720351
$$y_i$$2.82 × 1033.26 × 1027.73 × 1026.42 × 101396903720350
\n
\n```\n\n:::\n:::\n\n\n### Choice of variables\n\nThe choice of variables also differed substantially among analyses (@tbl-Table3) and some analysts constructed new variables that transformed or aggregated one or more existing variables. The blue tit dataset had a total of 52 candidate variables. These variables were included in a mean of 20.5 $Z_r$ analyses (range 0- 100). The *Eucalyptus* dataset had a total of 59 candidate variables. The variables in the *Eucalyptus* dataset were included in a mean of 8.92 $Z_r$ analyses (range 0-55).\n\n\n::: {#tbl-Table3 .cell tbl-cap='Mean, SD, minimum and maximum number of analyses in which each variable was used, for effect size analyses only ($Z_r$), out-of-sample prediction only ($y_i$), using the full dataset.'}\n\n```{.r .cell-code}\n#table 3 - summary of mean, sd and range for the number of analyses in which each variable was used\nTable3 %>% \n rename(SD = sd, subset = subset_name) %>% \n pivot_wider(\n names_from = dataset,\n names_sep = \".\",\n values_from = c(mean, SD, min, max)\n ) %>% \n ungroup %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::tab_spanner_delim(delim = \".\") %>% \n gt::fmt_scientific(columns = c(contains(\"mean\"), contains(\"SD\")),\n decimals = 2) %>% \n gt::cols_label_with(fn = Hmisc::capitalize) %>% \n gt::cols_label_with(c(contains(\"Eucalyptus\")), fn = ~ gt::md(paste0(\"*\",.x, \"*\"))) %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::tab_style(\n style = gt::cell_text(transform = \"capitalize\"),\n locations = gt::cells_column_spanners()\n )\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
\n mean\n \n SD\n \n min\n \n max\n
Blue titEucalyptusBlue titEucalyptusBlue titEucalyptusBlue titEucalyptus
$$Z_r$$2.05 × 1018.922.70 × 1011.23 × 1010010055
$$y_i$$1.08 × 1012.201.39 × 1013.70005217
\n
\n```\n\n:::\n:::\n\n\n## Effect Size Specification Analysis\n\nWe used a specification curve [@simonsohn2015] to look for relationships between $Z_r$ values and several modeling decisions, including the choice of independent and dependent variable, transformation of the dependent variable, and other features of the models that produced those $Z_r$ values (@fig-specr-bt, @fig-specr-euc). Each effect can be matched to the model features that produced it by following a vertical line down the figure.\n\n### Blue tit\n\nWe observed few clear trends in the blue tit specification curve (@fig-specr-bt). The clearest trend was for the independent variable 'contrast: reduced broods vs. unmanipulated broods' to produce weak or even positive relationships, but never strongly negative relationships. The biological interpretation of this pattern is that nestlings in reduced broods averaged similar growth to nestlings in unmanipulated broods, and sometimes the nestlings in reduced broods even grew less than the nestlings in unmanipulated broods. Therefore, it may be that competition limits nestling growth primarily when the number of nestlings exceeds the clutch size produced by the parents, and not in unmanipulated broods. The other relatively clear trend was that the strongest negative relationships were never based on the independent variable 'contrast: unmanipulated broods vs. enlarged broods'. These observations demonstrate the potential value of specification curves.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# knitr::read_chunk(here::here(\"index.qmd\"), labels = \"calc_MA_mod_coefs\")\n#TODO why is here??\ncoefs_MA_mod <- bind_rows( ManyEcoEvo_viz %>%\n filter(model_name == \"MA_mod\",\n exclusion_set == \"complete\",\n expertise_subset == \"All\"),\n ManyEcoEvo_viz %>%\n filter(model_name == \"MA_mod\",\n exclusion_set == \"complete-rm_outliers\",\n expertise_subset == \"All\") #TODO may need to recalculate\n ) %>%\n hoist(tidy_mod_summary) %>%\n select(-starts_with(\"mod\"), -ends_with(\"plot\"), -estimate_type) %>%\n unnest(cols = c(tidy_mod_summary))\n```\n:::\n\n\n\n::: {.cell .column-page-right layout-align=\"center\"}\n\n```{.r .cell-code}\nanalytical_choices_bt <- ManyEcoEvo_results$effects_analysis[[1]] %>% \n select(study_id, \n response_transformation_description, # Don't need constructed, as this is accounted for in y\n response_variable_name, \n test_variable, \n Bayesian, \n linear_model,\n model_subclass, \n sample_size, \n starts_with(\"num\"),\n link_function_reported,\n mixed_model) %>% \n mutate(across(starts_with(\"num\"), as.numeric),\n response_transformation_description = case_when(is.na(response_transformation_description) ~ \"None\",\n TRUE ~ response_transformation_description)) %>% \n rename(y = response_variable_name, x = test_variable, model = linear_model) %>% \n select(study_id,x,y,model, model_subclass, response_transformation_description, link_function_reported, mixed_model, sample_size) %>% \n pivot_longer(-study_id, names_to = \"variable_type\", values_to = \"variable_name\",values_transform = as.character) %>% \n left_join(forest_plot_new_labels) %>% \n mutate(variable_name = case_when(is.na(user_friendly_name) ~ variable_name, TRUE ~ user_friendly_name)) %>% \n select(-user_friendly_name) %>% \n pivot_wider(names_from = variable_type, values_from = variable_name) %>% \n mutate(sample_size = as.numeric(sample_size))\n\n\nMA_mean_bt <- ManyEcoEvo_viz$model[[1]] %>% \n broom::tidy(conf.int = TRUE) %>% \n rename(study_id = term)\n\nresults_bt <- ManyEcoEvo_viz$model[[1]] %>% \n broom::tidy(conf.int = TRUE, include_studies = TRUE) %>% \n rename(study_id = term) %>% \n semi_join(analytical_choices_bt) %>% \n left_join(analytical_choices_bt)\n\nsamp_size_hist_bt <- specr::plot_samplesizes(results_bt %>% \n rename(fit_nobs = sample_size)) +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\ncurve_bt <- specr::plot_curve(results_bt) +\n geom_hline(yintercept = 0, \n linetype = \"dashed\", \n color = \"black\") +\n geom_pointrange(mapping = aes(x = 0, y = estimate, ymin = conf.low, ymax = conf.high ), \n data = MA_mean_bt,\n colour = \"black\", shape = \"diamond\") +\n labs(x = \"\", y = \"Standardized Effect Size Zr\") +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\nspecs_bt <- specr::plot_choices(results_bt %>% \n rename(\"Independent\\nVariable\" = x,\n \"Dependent\\nVariable\" = y,\n Model = model,\n \"Model Subclass\" = model_subclass,\n \"Mixed Model\" = mixed_model,\n \"Response\\nTransformation\\nDescription\" = response_transformation_description,\n \"Link Function\" = link_function_reported), \n choices = c(\"Independent\\nVariable\", \n \"Dependent\\nVariable\", \n \"Model\", \n \"Model Subclass\", \n \"Mixed Model\", \n \"Response\\nTransformation\\nDescription\", \n \"Link Function\")) +\n labs(x = \"specifications (ranked)\") +\n theme(strip.text.x = element_blank(),\n strip.text.y = element_text(size = 8, angle = 360, face = \"bold\"),\n axis.ticks.x = element_blank(), \n axis.text.x = element_blank()) \n\n\ncowplot::plot_grid(curve_bt, specs_bt, samp_size_hist_bt,\n ncol = 1,\n align = \"v\",\n rel_heights = c(1.5, 2.2, 0.8),\n axis = \"rbl\",labels = \"AUTO\") \n```\n\n::: {.cell-output-display}\n![**A.** Forest plot for blue tit analyses: standardized effect-sizes (circles) and their 95% confidence intervals are displayed for each analysis included in the meta-analysis model. The meta-analytic mean effect-size is denoted by a black diamond, with error bars also representing the 95% confidence interval. The dashed black line demarcates effect sizes of 0, whereby no effect of the test variable on the response variable is found. Blue points where Zr and its associated confidence intervals are greater than 0 indicate analyses that found a negative effect of sibling number on nestling growth. Gray coloured points have confidence intervals crossing 0, indicating no relationship between the test and response variable. Red points indicate the analysis found a positive relationship between sibling number and nestling growth. **B.** Analysis specification plot: for each analysis plotted in A, the corresponding combination of analysis decisions is plotted. Each decision and its alternative choices is grouped into its own facet, with the decision point described on the right of the panel, and the option shown on the left. Lines indicate the option chosen used in the corresponding point in plot A. **C.** Sample sizes of each analysis. Note that empty bars indicate analyst did not report sample size and sample size could not be derived by lead team.](SM1_summary_files/figure-html/fig-specr-bt-1.png){#fig-specr-bt fig-align='center' width=1152}\n:::\n:::\n\n\n### *Eucalyptus*\n\nIn the *Eucalyptus* specification curve, there are no strong trends (@fig-specr-euc). It is, perhaps, the case that choosing the dependent variable 'count of seedlings 0-0.5m high' corresponds to more positive results and choosing 'count of all *Eucalytpus* seedlings' might find more negative results. Choosing the independent variable 'sum of all grass types (with or without non-grass graminoids)' might be associated with more results close to zero consistent with the absence of an effect.\n\n\n::: {.cell .column-page-right layout-align=\"center\"}\n\n```{.r .cell-code}\nanalytical_choices_euc <- ManyEcoEvo_results$effects_analysis[[2]] %>% \n select(study_id, \n response_transformation_description, # Don't need constructed, as this is accounted for in y\n response_variable_name, \n test_variable, \n Bayesian, \n linear_model,\n model_subclass, \n sample_size, \n starts_with(\"num\"),\n transformation,\n mixed_model,\n link_function_reported) %>% \n mutate(across(starts_with(\"num\"), as.numeric),\n response_transformation_description = case_when(is.na(response_transformation_description) ~ \"None\",\n TRUE ~ response_transformation_description),\n response_variable_name = case_when(response_variable_name == \"average.proportion.of.plots.containing.at.least.one.euc.seedling.of.any.size\" ~ \"mean.prop.plots>=1seedling\",\n TRUE ~ response_variable_name)) %>% \n rename(y = response_variable_name, x = test_variable, model = linear_model) %>% \n select(study_id,x,y,model, model_subclass, response_transformation_description, link_function_reported, mixed_model, sample_size) %>% \n pivot_longer(-study_id, names_to = \"variable_type\", values_to = \"variable_name\",values_transform = as.character) %>% \n left_join(forest_plot_new_labels) %>% \n mutate(variable_name = case_when(is.na(user_friendly_name) ~ variable_name, TRUE ~ user_friendly_name)) %>% \n select(-user_friendly_name) %>% \n pivot_wider(names_from = variable_type, values_from = variable_name, values_fn = list) %>% \n unnest(cols = everything()) %>% #TODO remove unnest and values_fn = list when origin of duplicate entry for R_1LRqq2WHrQaENtM-1-1-1 is identified\n mutate(sample_size = as.numeric(sample_size))\n\n\n\nMA_mean_euc <- ManyEcoEvo_viz %>% \n filter(model_name == \"MA_mod\", publishable_subset == \"All\", dataset == \"eucalyptus\", exclusion_set == \"complete\") %>% \n pluck(\"model\", 1) %>% \n broom::tidy(conf.int = TRUE) %>% \n rename(study_id = term) \n\nresults_euc <- ManyEcoEvo_viz %>% \n filter(model_name == \"MA_mod\", publishable_subset == \"All\", dataset == \"eucalyptus\", exclusion_set == \"complete\") %>% \n pluck(\"model\", 1) %>% \n broom::tidy(conf.int = TRUE, include_studies = TRUE) %>% \n rename(study_id = term) %>% \n semi_join(analytical_choices_euc) %>% \n left_join(analytical_choices_euc)\n\nsamp_size_hist_euc <- specr::plot_samplesizes(results_euc %>% rename(fit_nobs = sample_size)) +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), axis.text.x = element_blank())\n\ncurve_euc <- specr::plot_curve(results_euc) +\n geom_hline(yintercept = 0, \n linetype = \"dashed\", \n color = \"black\") +\n geom_pointrange(mapping = aes(x = 0, y = estimate, ymin = conf.low, ymax = conf.high ), \n data = MA_mean_euc,\n colour = \"black\", shape = \"diamond\") +\n labs(x = \"\", y = \"Standardized Effect Size Zr\") +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\nspecs_euc <- specr::plot_choices(results_euc %>% \n rename(\"Independent\\nVariable\" = x,\n \"Dependent\\nVariable\" = y,\n Model = model,\n \"Model Subclass\" = model_subclass,\n \"Mixed Model\" = mixed_model,\n \"Response\\nTransformation\\nDescription\" = response_transformation_description,\n \"Link Function\" = link_function_reported), \n choices = c(\"Independent\\nVariable\", \n \"Dependent\\nVariable\", \n \"Model\", \n \"Model Subclass\", \n \"Mixed Model\", \n \"Response\\nTransformation\\nDescription\", \n \"Link Function\")) +\n labs(x = \"specifications (ranked)\") +\n theme(strip.text.x = element_blank(),\n strip.text.y = element_text(size = 8, angle = 360, face = \"bold\"),\n axis.ticks.x = element_blank(), \n axis.text.x = element_blank()) \n\ncowplot::plot_grid(curve_euc, specs_euc, samp_size_hist_euc,\n ncol = 1,\n align = \"v\",\n rel_heights = c(1.5, 2.2, 0.8),\n axis = \"rbl\",labels = \"AUTO\") \n```\n\n::: {.cell-output-display}\n![**A.** Forest plot for *Eucalyptus* analyses: standardized effect-sizes (circles) and their 95% confidence intervals are displayed for each analysis included in the meta-analysis model. The meta-analytic mean effect-size is denoted by a black diamond, with error bars also representing the 95% confidence interval. The dashed black line demarcates effect sizes of 0, whereby no effect of the test variable on the response variable is found. Blue points where $Z_r$ and its associated confidence intervals are greater than 0 indicate analyses that found a positive relationship of grass cover on *Eucalyptus* seedling success. Gray coloured points have confidence intervals crossing 0, indicating no relationship between the test and response variable. Red points indicate the analysis found a negative relationship between grass cover and *Eucalyptus seedling success*. **B.** Analysis specification plot: for each analysis plotted in A, the corresponding combination of analysis decisions is plotted. Each decision and its alternative choices is grouped into its own facet, with the decision point described on the right of the panel, and the option shown on the left. Lines indicate the option chosen used in the corresponding point in plot A. **C.** Sample sizes of each analysis. Note that empty bars indicate analyst did not report sample size and sample size could not be derived by lead team.](SM1_summary_files/figure-html/fig-specr-euc-1.png){#fig-specr-euc fig-align='center' width=1152}\n:::\n:::\n", + "markdown": "---\ntitle: \"Summarising Variation Among Analysis Specifications\"\nformat: \n html:\n code-fold: true\n echo: true\neditor: visual\nexecute:\n freeze: auto # re-render only when source changes\nbibliography: ../ms/references.bib\nnumber-sections: true\ntbl-cap-location: top\neditor_options: \n chunk_output_type: console\npre-render: \"utils.R\"\n---\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(targets)\nlibrary(withr)\nlibrary(here)\nlibrary(metafor)\nlibrary(ManyEcoEvo)\nlibrary(tidyverse)\nlibrary(broom)\nlibrary(gt)\nlibrary(specr)\nlibrary(colorspace)\nlibrary(ggthemes)\nlibrary(ggh4x)\nlibrary(showtext)\n\nset.seed(1234)\nsource(here::here(\"utils.R\"))\n# extrafont::font_install(\"Lato\")\n```\n:::\n\n\n\n\n\n\n## Summary Statistics\n\n### Number of analyses of different types\n\nAs described in the summary statistics section of the manuscript, 63 teams submitted 131 $Z_r$ model estimates and 43 teams submitted 64 $y_i$ predictions for the blue tit dataset. Similarly, 40 submitted 79 $Z_r$ model estimates and 14 teams submitted 24 $y_i$ predictions for the *Eucalytpus* dataset. The majority of the blue tit analyses specified normal error distributions and were non-Bayesian mixed effects models. Analyses of the *Eucalyptus* dataset rarely specified normal error distributions, likely because the response variable was in the form of counts. Mixed effects models were also common for *Eucalytpus* analyses (@tbl-Table1).\n\n\n\n\n::: {#tbl-Table1 .cell tbl-cap='Summary of the number of analysis teams, total analyses, models with normal error distributions, mixed effects models, and models developed with Bayesian statistical methods for effect size analyses only ($Z_r$) and out-of-sample prediction only ($y_i$).'}\n\n```{.r .cell-code}\nTable1 %>% \n rename(subset = subset_name) %>% \n rename_with(~ str_remove(., \"sum_\")) %>% \n group_by(dataset) %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::cols_label(dataset = \"dataset\",\n subset = \"Subset\",\n totalanalyses = \"No. Analyses\",\n teams = \"No. Teams\",\n linear = \"Normal Distribution\",\n mixed = \"Mixed Effect\") %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::text_transform(fn = function(x) ifelse(x == \"eucalyptus\", \n gt::md(paste(\"*Eucalyptus*\")), x),\n locations = gt::cells_row_groups()) %>% \n gt::text_transform(fn = function(x) map(x, gt::md), \n locations = gt::cells_row_groups()) %>% \n gt::cols_move(teams,after = totalanalyses) %>% \n gt::as_raw_html()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
No. AnalysesNo. TeamsNormal DistributionMixed EffectBayesian

blue tit

\n
$$Z_r$$1316312412810
$$y_i$$6443596310

Eucalyptus

\n
$$Z_r$$794015625
$$y_i$$24141163
\n
\n```\n\n:::\n:::\n\n\n\n\n### Model composition\n\nThe composition of models varied substantially (@tbl-Table2) in regards to the number of fixed and random effects, interaction terms and the number of data points used. For the blue tit dataset, models used up to 19 fixed effects, 12 random effects, and 10 interaction terms and had sample sizes ranging from 76 to 3720. For the *Eucalyptus* dataset models had up to 13 fixed effects, 4 random effects, 5 interaction terms and sample sizes ranging from 18 to 351.\n\n\n\n\n::: {#tbl-Table2 .cell tbl-cap='Mean, standard deviation and range of number of fixed and random variables, interaction terms used in models and analysis sample size (*N*). Repeated for effect-size analyses only ($Z_r$) and out-of-sample predictions only ($y_i$).'}\n\n```{.r .cell-code}\nTable2 %>% \n rename(SD = sd, subset = subset_name) %>% \n group_by(variable) %>% \n pivot_wider(\n names_from = dataset,\n names_sep = \".\",\n values_from = c(mean, SD, min, max)\n ) %>% \n mutate(variable = case_when(variable == \"samplesize\" ~ \"N\",\n TRUE ~ variable)) %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::row_group_order(groups = c(\"fixed\", \"random\", \"interactions\", \"N\")) %>% \n gt::tab_spanner_delim(delim = \".\") %>% \n gt::fmt_scientific(columns = \"mean.blue tit\",\n rows = `mean.blue tit` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"SD.blue tit\",\n rows = `SD.blue tit` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"mean.eucalyptus\",\n rows = `mean.eucalyptus` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"SD.eucalyptus\",\n rows = `SD.eucalyptus` < 0.01,\n decimals = 2) %>% \n gt::cols_label_with(fn = Hmisc::capitalize) %>% \n gt::tab_style(\n style = gt::cell_text(transform = \"capitalize\"),\n locations = gt::cells_column_spanners()\n ) %>% \n gt::tab_style(style = gt::cell_text(transform = \"capitalize\"),locations = cells_row_groups()) %>% \ngt::tab_style(style = gt::cell_text(style = \"italic\"), locations = cells_row_groups(groups = \"N\")) %>% \n gt::cols_label_with(c(contains(\"Eucalyptus\")), \n fn = ~ gt::md(paste0(\"*\",.x, \"*\"))) %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::as_raw_html()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
\n mean\n \n SD\n \n min\n \n max\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
fixed
$$Z_r$$5.205.012.923.83111913
$$y_i$$4.784.672.353.45111013
random
$$Z_r$$3.531.412.081.0900104
$$y_i$$4.420.962.780.8110123
interactions
$$Z_r$$0.440.161.110.6500105
$$y_i$$0.280.170.630.480032
N
$$Z_r$$2611.09298.43937.48106.2576183720351
$$y_i$$2816.71325.55773.2164.17396903720350
\n
\n```\n\n:::\n:::\n\n\n\n\n### Choice of variables\n\nThe choice of variables also differed substantially among analyses (@tbl-Table3) and some analysts constructed new variables that transformed or aggregated one or more existing variables. The blue tit dataset had a total of 52 candidate variables. These variables were included in a mean of 20.5 $Z_r$ analyses (range 0- 100). The *Eucalyptus* dataset had a total of 59 candidate variables. The variables in the *Eucalyptus* dataset were included in a mean of 8.92 $Z_r$ analyses (range 0-55).\n\n\n\n\n::: {#tbl-Table3 .cell tbl-cap='Mean, $\\text{SE}$, minimum and maximum number of analyses in which each variable was used, for effect size analyses only ($Z_r$), out-of-sample prediction only ($y_i$), using the full dataset.'}\n\n```{.r .cell-code}\n#table 3 - summary of mean, sd and range for the number of analyses in which each variable was used\nTable3 %>% \n rename(SD = sd, subset = subset_name) %>% \n pivot_wider(\n names_from = dataset,\n names_sep = \".\",\n values_from = c(mean, SD, min, max)\n ) %>% \n ungroup %>% \n gt::gt(rowname_col = \"subset\") %>% \n gt::tab_spanner_delim(delim = \".\") %>% \n gt::fmt_scientific(columns = \"mean.blue tit\",\n rows = `mean.blue tit` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"SD.blue tit\",\n rows = `SD.blue tit` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"mean.eucalyptus\",\n rows = `mean.eucalyptus` < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"SD.eucalyptus\",\n rows = `SD.eucalyptus` < 0.01,\n decimals = 2) %>% \n gt::fmt_number(decimals = 2,drop_trailing_zeros = T, drop_trailing_dec_mark = T) %>% \n gt::cols_label_with(fn = Hmisc::capitalize) %>% \n gt::cols_label_with(c(contains(\"Eucalyptus\")), fn = ~ gt::md(paste0(\"*\",.x, \"*\"))) %>% \n gt::sub_values(columns = subset, values = c(\"effects\"), \n replacement = gt::md(\"$$Z_r$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"predictions\"), \n replacement = gt::md(\"$$y_i$$\")) %>% \n gt::sub_values(columns = subset, values = c(\"all\"), \n replacement = gt::md(\"All analyses\")) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::tab_style(\n style = gt::cell_text(transform = \"capitalize\"),\n locations = gt::cells_column_spanners()\n ) %>% \n gt::as_raw_html()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
\n mean\n \n SD\n \n min\n \n max\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
Blue tit

Eucalyptus

\n
$$Z_r$$20.58.922712.280010055
$$y_i$$10.792.213.873.7005217
\n
\n```\n\n:::\n:::\n\n\n\n\n## Effect Size Specification Analysis\n\nWe used a specification curve [@simonsohn2015] to look for relationships between $Z_r$ values and several modeling decisions, including the choice of independent and dependent variable, transformation of the dependent variable, and other features of the models that produced those $Z_r$ values (@fig-specr-bt, @fig-specr-euc). Each effect can be matched to the model features that produced it by following a vertical line down the figure.\n\n### Blue tit\n\nWe observed few clear trends in the blue tit specification curve (@fig-specr-bt). The clearest trend was for the independent variable 'contrast: reduced broods vs. unmanipulated broods' to produce weak or even positive relationships, but never strongly negative relationships. The biological interpretation of this pattern is that nestlings in reduced broods averaged similar growth to nestlings in unmanipulated broods, and sometimes the nestlings in reduced broods even grew less than the nestlings in unmanipulated broods. Therefore, it may be that competition limits nestling growth primarily when the number of nestlings exceeds the clutch size produced by the parents, and not in unmanipulated broods. The other relatively clear trend was that the strongest negative relationships were never based on the independent variable 'contrast: unmanipulated broods vs. enlarged broods'. These observations demonstrate the potential value of specification curves.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# knitr::read_chunk(here::here(\"index.qmd\"), labels = \"calc_MA_mod_coefs\")\n#TODO why is here??\n\ncoefs_MA_mod <- bind_rows( ManyEcoEvo_viz %>%\n filter(model_name == \"MA_mod\",\n exclusion_set == \"complete\",\n expertise_subset == \"All\"),\n ManyEcoEvo_viz %>%\n filter(model_name == \"MA_mod\",\n exclusion_set == \"complete-rm_outliers\",\n expertise_subset == \"All\") #TODO may need to recalculate\n ) %>%\n hoist(tidy_mod_summary) %>%\n select(-starts_with(\"mod\"), -ends_with(\"plot\"), -estimate_type) %>%\n unnest(cols = c(tidy_mod_summary))\n```\n:::\n\n\n\n::: {.cell .column-page-right layout-align=\"center\"}\n\n```{.r .cell-code}\nanalytical_choices_bt <- ManyEcoEvo_results$effects_analysis[[1]] %>% \n select(study_id, \n response_transformation_description, # Don't need constructed, as this is accounted for in y\n response_variable_name, \n test_variable, \n Bayesian, \n linear_model,\n model_subclass, \n sample_size, \n starts_with(\"num\"),\n link_function_reported,\n mixed_model) %>% \n mutate(across(starts_with(\"num\"), as.numeric),\n response_transformation_description = case_when(is.na(response_transformation_description) ~ \"None\",\n TRUE ~ response_transformation_description)) %>% \n rename(y = response_variable_name, x = test_variable, model = linear_model) %>% \n select(study_id,x,y,model, model_subclass, response_transformation_description, link_function_reported, mixed_model, sample_size) %>% \n pivot_longer(-study_id, names_to = \"variable_type\", values_to = \"variable_name\",values_transform = as.character) %>% \n left_join(forest_plot_new_labels) %>% \n mutate(variable_name = case_when(is.na(user_friendly_name) ~ variable_name, TRUE ~ user_friendly_name)) %>% \n select(-user_friendly_name) %>% \n pivot_wider(names_from = variable_type, values_from = variable_name) %>% \n mutate(sample_size = as.numeric(sample_size))\n\n\nMA_mean_bt <- ManyEcoEvo_viz$model[[1]] %>% \n broom::tidy(conf.int = TRUE) %>% \n rename(study_id = term)\n\nresults_bt <- ManyEcoEvo_viz$model[[1]] %>% \n broom::tidy(conf.int = TRUE, include_studies = TRUE) %>% \n rename(study_id = term) %>% \n semi_join(analytical_choices_bt) %>% \n left_join(analytical_choices_bt)\n\nsamp_size_hist_bt <- specr::plot_samplesizes(results_bt %>% \n rename(fit_nobs = sample_size)) +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\ncurve_bt <- specr::plot_curve(results_bt) +\n geom_hline(yintercept = 0, \n linetype = \"dashed\", \n color = \"black\") +\n geom_pointrange(mapping = aes(x = 0, y = estimate, ymin = conf.low, ymax = conf.high ), \n data = MA_mean_bt,\n colour = \"black\", shape = \"diamond\") +\n labs(x = \"\", y = \"Standardized Effect Size Zr\") +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\nspecs_bt <- specr::plot_choices(results_bt %>% \n rename(\"Independent\\nVariable\" = x,\n \"Dependent\\nVariable\" = y,\n Model = model,\n \"Model Subclass\" = model_subclass,\n \"Mixed Model\" = mixed_model,\n \"Response\\nTransformation\\nDescription\" = response_transformation_description,\n \"Link Function\" = link_function_reported), \n choices = c(\"Independent\\nVariable\", \n \"Dependent\\nVariable\", \n \"Model\", \n \"Model Subclass\", \n \"Mixed Model\", \n \"Response\\nTransformation\\nDescription\", \n \"Link Function\")) +\n labs(x = \"specifications (ranked)\") +\n theme(strip.text.x = element_blank(),\n strip.text.y = element_text(size = 8, angle = 360, face = \"bold\"),\n axis.ticks.x = element_blank(), \n axis.text.x = element_blank()) \n\n\ncowplot::plot_grid(curve_bt, specs_bt, samp_size_hist_bt,\n ncol = 1,\n align = \"v\",\n rel_heights = c(1.5, 2.2, 0.8),\n axis = \"rbl\",labels = \"AUTO\") \n```\n\n::: {.cell-output-display}\n![**A.** Forest plot for blue tit analyses: standardized effect-sizes (circles) and their 95% confidence intervals are displayed for each analysis included in the meta-analysis model. The meta-analytic mean effect-size is denoted by a black diamond, with error bars also representing the 95% confidence interval. The dashed black line demarcates effect sizes of 0, whereby no effect of the test variable on the response variable is found. Blue points where Zr and its associated confidence intervals are greater than 0 indicate analyses that found a negative effect of sibling number on nestling growth. Gray coloured points have confidence intervals crossing 0, indicating no relationship between the test and response variable. Red points indicate the analysis found a positive relationship between sibling number and nestling growth. **B.** Analysis specification plot: for each analysis plotted in A, the corresponding combination of analysis decisions is plotted. Each decision and its alternative choices is grouped into its own facet, with the decision point described on the right of the panel, and the option shown on the left. Lines indicate the option chosen used in the corresponding point in plot A. **C.** Sample sizes of each analysis. Note that empty bars indicate analyst did not report sample size and sample size could not be derived by lead team.](SM1_summary_files/figure-html/fig-specr-bt-1.png){#fig-specr-bt fig-align='center' width=1152}\n:::\n:::\n\n\n\n\n### *Eucalyptus*\n\nIn the *Eucalyptus* specification curve, there are no strong trends (@fig-specr-euc). It is, perhaps, the case that choosing the dependent variable 'count of seedlings 0-0.5m high' corresponds to more positive results and choosing 'count of all *Eucalytpus* seedlings' might find more negative results. Choosing the independent variable 'sum of all grass types (with or without non-grass graminoids)' might be associated with more results close to zero consistent with the absence of an effect.\n\n\n\n\n::: {.cell .column-page-right layout-align=\"center\"}\n\n```{.r .cell-code}\nanalytical_choices_euc <- ManyEcoEvo_results$effects_analysis[[2]] %>% \n select(study_id, \n response_transformation_description, # Don't need constructed, as this is accounted for in y\n response_variable_name, \n test_variable, \n Bayesian, \n linear_model,\n model_subclass, \n sample_size, \n starts_with(\"num\"),\n transformation,\n mixed_model,\n link_function_reported) %>% \n mutate(across(starts_with(\"num\"), as.numeric),\n response_transformation_description = case_when(is.na(response_transformation_description) ~ \"None\",\n TRUE ~ response_transformation_description),\n response_variable_name = case_when(response_variable_name == \"average.proportion.of.plots.containing.at.least.one.euc.seedling.of.any.size\" ~ \"mean.prop.plots>=1seedling\",\n TRUE ~ response_variable_name)) %>% \n rename(y = response_variable_name, x = test_variable, model = linear_model) %>% \n select(study_id,x,y,model, model_subclass, response_transformation_description, link_function_reported, mixed_model, sample_size) %>% \n pivot_longer(-study_id, names_to = \"variable_type\", values_to = \"variable_name\",values_transform = as.character) %>% \n left_join(forest_plot_new_labels) %>% \n mutate(variable_name = case_when(is.na(user_friendly_name) ~ variable_name, TRUE ~ user_friendly_name)) %>% \n select(-user_friendly_name) %>% \n pivot_wider(names_from = variable_type, values_from = variable_name, values_fn = list) %>% \n unnest(cols = everything()) %>% #TODO remove unnest and values_fn = list when origin of duplicate entry for R_1LRqq2WHrQaENtM-1-1-1 is identified\n mutate(sample_size = as.numeric(sample_size))\n\n\n\nMA_mean_euc <- ManyEcoEvo_viz %>% \n filter(model_name == \"MA_mod\", publishable_subset == \"All\", dataset == \"eucalyptus\", exclusion_set == \"complete\") %>% \n pluck(\"model\", 1) %>% \n broom::tidy(conf.int = TRUE) %>% \n rename(study_id = term) \n\nresults_euc <- ManyEcoEvo_viz %>% \n filter(model_name == \"MA_mod\", publishable_subset == \"All\", dataset == \"eucalyptus\", exclusion_set == \"complete\") %>% \n pluck(\"model\", 1) %>% \n broom::tidy(conf.int = TRUE, include_studies = TRUE) %>% \n rename(study_id = term) %>% \n semi_join(analytical_choices_euc) %>% \n left_join(analytical_choices_euc)\n\nsamp_size_hist_euc <- specr::plot_samplesizes(results_euc %>% rename(fit_nobs = sample_size)) +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), axis.text.x = element_blank())\n\ncurve_euc <- specr::plot_curve(results_euc) +\n geom_hline(yintercept = 0, \n linetype = \"dashed\", \n color = \"black\") +\n geom_pointrange(mapping = aes(x = 0, y = estimate, ymin = conf.low, ymax = conf.high ), \n data = MA_mean_euc,\n colour = \"black\", shape = \"diamond\") +\n labs(x = \"\", y = \"Standardized Effect Size Zr\") +\n cowplot::theme_half_open() +\n theme(axis.ticks.x = element_blank(), \n axis.text.x = element_blank())\n\nspecs_euc <- specr::plot_choices(results_euc %>% \n rename(\"Independent\\nVariable\" = x,\n \"Dependent\\nVariable\" = y,\n Model = model,\n \"Model Subclass\" = model_subclass,\n \"Mixed Model\" = mixed_model,\n \"Response\\nTransformation\\nDescription\" = response_transformation_description,\n \"Link Function\" = link_function_reported), \n choices = c(\"Independent\\nVariable\", \n \"Dependent\\nVariable\", \n \"Model\", \n \"Model Subclass\", \n \"Mixed Model\", \n \"Response\\nTransformation\\nDescription\", \n \"Link Function\")) +\n labs(x = \"specifications (ranked)\") +\n theme(strip.text.x = element_blank(),\n strip.text.y = element_text(size = 8, angle = 360, face = \"bold\"),\n axis.ticks.x = element_blank(), \n axis.text.x = element_blank()) \n\ncowplot::plot_grid(curve_euc, specs_euc, samp_size_hist_euc,\n ncol = 1,\n align = \"v\",\n rel_heights = c(1.5, 2.2, 0.8),\n axis = \"rbl\",labels = \"AUTO\") \n```\n\n::: {.cell-output-display}\n![**A.** Forest plot for *Eucalyptus* analyses: standardized effect-sizes (circles) and their 95% confidence intervals are displayed for each analysis included in the meta-analysis model. The meta-analytic mean effect-size is denoted by a black diamond, with error bars also representing the 95% confidence interval. The dashed black line demarcates effect sizes of 0, whereby no effect of the test variable on the response variable is found. Blue points where $Z_r$ and its associated confidence intervals are greater than 0 indicate analyses that found a positive relationship of grass cover on *Eucalyptus* seedling success. Gray coloured points have confidence intervals crossing 0, indicating no relationship between the test and response variable. Red points indicate the analysis found a negative relationship between grass cover and *Eucalyptus seedling success*. **B.** Analysis specification plot: for each analysis plotted in A, the corresponding combination of analysis decisions is plotted. Each decision and its alternative choices is grouped into its own facet, with the decision point described on the right of the panel, and the option shown on the left. Lines indicate the option chosen used in the corresponding point in plot A. **C.** Sample sizes of each analysis. Note that empty bars indicate analyst did not report sample size and sample size could not be derived by lead team.](SM1_summary_files/figure-html/fig-specr-euc-1.png){#fig-specr-euc fig-align='center' width=1152}\n:::\n:::\n", "supporting": [ "SM1_summary_files/figure-html" ], diff --git a/_freeze/supp_mat/SM2_EffectSizeAnalysis/execute-results/html.json b/_freeze/supp_mat/SM2_EffectSizeAnalysis/execute-results/html.json index 93a7ce2..d442bbf 100644 --- a/_freeze/supp_mat/SM2_EffectSizeAnalysis/execute-results/html.json +++ b/_freeze/supp_mat/SM2_EffectSizeAnalysis/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "ee97001ea06284bb79e0ae8fd1c2db60", + "hash": "1d0daedd24ea4f4010d3969fdfdaa226", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Effect Size Analysis\"\n# lib-dir: \"renv/library/R-4.4/aarch64-apple-darwin20/\"\nformat: html\neditor: visual\nnumber-sections: true\ncode-fold: true\nexecute:\n freeze: auto # re-render only when source changes\n---\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(withr)\nlibrary(here)\nlibrary(tidyverse)\nlibrary(performance)\nlibrary(broom.mixed)\nlibrary(gt)\nlibrary(lme4)\nlibrary(MuMIn)\nlibrary(ManyEcoEvo)\nlibrary(ggrepel)\nlibrary(glue)\nlibrary(gluedown)\nset.seed(1234)\n```\n:::\n\n::: {.cell eavl='true'}\n\n```{.r .cell-code}\nManyEcoEvo_results <- \n ManyEcoEvo_results %>% \n mutate(effects_analysis = \n map(effects_analysis, \n rename, \n id_col = study_id)) #%>% \n # mutate_at(c(\"data\", \n # \"diversity_data\", \n # \"diversity_indices\", \n # \"effects_analysis\"),\n # .funs = ~ map(.x, anonymise_teams,\n # TeamIdentifier_lookup))\n```\n:::\n\n\n## Meta-analysis\n\n### Effect Sizes $Z_r$\n\n#### Effect of categorical review rating\n\nThe figures below (@fig-euc-cat-ratings-MA,@fig-bt-cat-ratings-MA) shows the fixed effect of categorical review rating on deviation from the meta-analytic mean. There is very little difference in deviation for analyses in any of the review categories. It is worth noting that each analysis features multiple times in these figures corresponding to the multiple reviewers that provided ratings.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Orchard plot of meta-analytic model fitted to all eucalyptus analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.](SM2_EffectSizeAnalysis_files/figure-html/fig-euc-cat-ratings-MA-1.png){#fig-euc-cat-ratings-MA width=768}\n:::\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![Orchard plot of meta-analytic model fitted to all blue tit analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.](SM2_EffectSizeAnalysis_files/figure-html/fig-bt-cat-ratings-MA-1.png){#fig-bt-cat-ratings-MA width=768}\n:::\n:::\n\n\n#### Post-hoc analysis: Exploring the effect of removing analyses with poor peer-review ratings on heterogeneity\n\nThe forest plots in @fig-all-forest-plots-Zr compare the distributions of $Z_r$ effects from our full set of analyses with the distributions of $Z_r$ effects from our post-hoc analyses, which removed either analyses that were reviewed at least once as being 'unpublishable', and analyses that were reviewed at least once as being 'unpublishable' or requiring 'major revisions'. Removing these analyses from the blue tit data had little impact on the overall distribution of the results. When 'unpublishable' analyses of the *Eucalyptus* dataset were removed, the extreme outlier 'Brooklyn-2-2-1' was also removed, resulting in a substantial difference to the amount of observed deviation from the meta-analytic mean.\n\n\n::: {.cell .column-body-outset-right}\n::: {.cell-output-display}\n![Forest plots of meta-analytic estimated standardized effect sizes ($Z_r$, blue circles) and their 95% confidence intervals for each effect size included in the meta-analysis model. The meta-analytic mean effect size is denoted by a black triangle and a dashed vertical line, with error bars also representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0, indicating no relationship between the test variable and the response variable. The left side of each panel shows the analysis team names (anonymous arbitrary names assigned by us), each followed by three numbers. The first number is the submission ID (some analyst teams submitted results to us on >1 submission form), the second number is the analysis ID (some analyst teams included results of >1 analysis in a given submission), and the third number is the effect ID (some analysts submitted values for >1 effect per analysis). Thus, each row in each forest plot is uniquely identified, but it is possible to determine which effects come from which analyses and which analysis teams. The plots in the top row depict effects from analyses of blue tit data, and the bottom row plots depict effects from analyses of Eucalyptus data. The right-most plots depict all usable effect sizes. The plots on the left exclude effects from analysis sets that received at least one rating of “unpublishable” from peer reviewers, and the plots in the middle exclude effects from analysis sets that received at least one rating of either “unpublishable” or “major revision” from peer reviewers.](SM2_EffectSizeAnalysis_files/figure-html/fig-all-forest-plots-Zr-1.png){#fig-all-forest-plots-Zr width=960}\n:::\n:::\n\n\n#### Post-hoc analysis: Exploring the effect of excluding estimates in which we had reduced confidence\n\nFor each dataset (blue tit, Eucalyptus), we created a second, more conservative version, that excluded effects based on estimates of $df$ that we considered less reliable (@tbl-Zr-exclusion-subsetting). We compared the outcomes of analyses of the primary dataset (constituted according to our registered plan) with the outcomes of analyses of the more conservative version of the dataset. We also compared results from analyses of both of these versions of the dataset to versions with our post-hoc removal of outliers described in the main text. Our more conservative exclusions (based on unreliable estimates of $df$) had minimal impact on the meta-analytic mean for both blue tit and Eucalyptus analyses, regardless of whether outliers were excluded (@tbl-Zr-exclusion-subsetting).\n\n\n::: {#tbl-Zr-exclusion-subsetting .cell tbl-cap='Estimated meta-analytic mean, standard error, and 95% confidence intervals, from analyses of the primary data set, the more conservative version of the dataset which excluded effects based on less reliable estimates of $df$, and both of these datasets with outliers removed.'}\n\n```{.r .cell-code}\nManyEcoEvo_viz %>% \n dplyr::filter(estimate_type == \"Zr\", \n model_name == \"MA_mod\", \n collinearity_subset != \"collinearity_removed\") %>% \n hoist(tidy_mod_summary) %>% \n unnest(tidy_mod_summary) %>% \n filter(publishable_subset == \"All\", expertise_subset == \"All\") %>% \n select(-publishable_subset, -expertise_subset) %>% \n select(dataset, \n exclusion_set, \n estimate, \n std.error, \n statistic, \n p.value, \n starts_with(\"conf\")) %>% \n mutate(exclusion_set = \n case_when(exclusion_set == \"complete\" ~ \n \"Primary dataset\",\n exclusion_set == \"complete-rm_outliers\" ~ \n \"Primary dataset, outliers removed\",\n exclusion_set == \"partial\" ~ \n \"Conservative exclusions\",\n TRUE ~ \"Conservative exclusions, outliers removed\")) %>% \ngroup_by(exclusion_set) %>% \n gt::gt() %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::fmt(columns = \"p.value\",\n fns = function(x) gtsummary::style_pvalue(x, prepend_p = FALSE)) %>% \n gt::fmt_number(columns = c(-p.value, -dataset)) %>% \n gt::cols_label(estimate = gt::md(\"$$\\\\hat\\\\mu$$\"), \n std.error = gt::md(\"$$SE[\\\\hat\\\\mu]$$\"),\n conf.low = gt::md(\"95\\\\%CI\")) %>% \n gt::cols_merge(columns = starts_with(\"conf\"), \n pattern = \"[{1},{2}]\") %>% \n gt::cols_move(columns = conf.low, after = std.error) %>% \n gt::tab_style(\n style = list(gt::cell_text(transform = \"capitalize\"), \n gt::cell_text(style = \"italic\")),\n locations = gt::cells_body(columns = \"dataset\", rows = dataset == \"eucalyptus\")\n ) \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
dataset$$\\hat\\mu$$$$SE[\\hat\\mu]$$95%CIstatisticp.value
Primary dataset
blue tit−0.350.03[−0.41,−0.29]−11.02<0.001
eucalyptus−0.090.06[−0.22,0.03]−1.470.14
Conservative exclusions
blue tit−0.360.03[−0.42,−0.29]−10.77<0.001
eucalyptus−0.110.07[−0.24,0.03]−1.550.12
Primary dataset, outliers removed
blue tit−0.360.03[−0.42,−0.30]−11.48<0.001
eucalyptus−0.030.01[−0.06,0.00]−2.230.026
Conservative exclusions, outliers removed
blue tit−0.360.03[−0.43,−0.30]−11.38<0.001
eucalyptus−0.040.02[−0.07,−0.01]−2.520.012
\n
\n```\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE ){\n if (MA_mean == FALSE) {\n data <- filter(data, term != \"Overall\")\n }\n \n p <- ggplot(data, aes(y = term, \n x = estimate, \n ymin = conf.low, \n ymax = conf.high,\n # shape = point_shape,\n colour = parameter_type)) +\n geom_pointrange() +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(size = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\"),\n axis.text.y = element_blank()) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip() +\n labs(y = \"Standardised Effect Size, Zr\",\n x = element_blank()) +\n scale_x_continuous(breaks = c(-4,-3,-2,-1,0,1),\n minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\")\n \n if (intercept == TRUE) {\n p <- p + geom_hline(yintercept = 0)\n }\n if (MA_mean == TRUE) {\n p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n data = data,\n colour = \"#01353D\", \n linetype = \"dashed\")\n }\n \n return(p)\n}\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ncomplete_euc_data <- \n ManyEcoEvo_viz %>% \n filter(exclusion_set == \"complete\", \n estimate_type == \"Zr\", \n model_name == \"MA_mod\",\n dataset == \"eucalyptus\",\n publishable_subset == \"All\") %>% \n select(model) %>% \n mutate(plot_data = map(model, \n .f = ~ broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE) %>% \n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\"))\n\n# complete_euc_data <- \n# complete_euc_data %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., complete_euc_data$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\nmin_outlier_euc <- complete_euc_data %>% \n filter(type == \"study\") %>% \n slice_min(estimate, n = 3) %>% \n pull(term)\n\nsample_size_euc_Zr <- ManyEcoEvo_results %>% \n filter(exclusion_set == \"complete\", dataset == \"eucalyptus\") %>% \n pluck(\"data\", 1) %>% \n select(id_col, sample_size) %>% \n rename(term = id_col) %>% \n mutate(sample_size = as.numeric(sample_size))\n\nmean_n_euc_Zr <- sample_size_euc_Zr %>% \n drop_na(sample_size) %>% \n pull(sample_size) %>% \n mean() %>% \n round(2)\n\nN_outliers_Zr_euc <- sample_size_euc_Zr %>% \n filter(term %in% min_outlier_euc) %>% \n arrange(desc(sample_size))\n```\n:::\n\n\n#### Post-hoc analysis: Exploring the effect of including only analyses conducted by analysis teams with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statitistical analyses in their research area\n\nThe anonymous Team Identifiers in the reduced subset of \"expert\" or \"highly proficient\" analysts are exported internally in the `ManyEcoEvo` package as `ManyEcoEvo:::expert_subset`. Analyses from the following teams are retained in the reduced subset: _Bell_, _Berr_, _Brim_, _Bruc_, _Burr_, _Byng_, _Cape_, _Clar_, _Clev_, _Alban_, _Alpha_, _Bargo_, _Berry_, _Bowen_, _Bulli_, _Aldgat_, _Alding_, _Anakie_, _Aramac_, _August_, _Bamaga_, _Barham_, _Barmah_, _Batlow_, _Beltan_, _Bethan_, _Beulah_, _Bindoo_, _Boonah_, _Bowral_, _Bright_, _Buchan_, _Burnie_, _Cairns_, _Casino_, _Cattai_, _Adelong_, _Angasto_, _Antwerp_, _Arltung_, _Ashford_, _Babinda_, _Bargara_, _Barooga_, _Barraba_, _Belmont_, _Bemboka_, _Benalla_, _Bendigo_, _Berrima_, _Berwick_, _Beverle_, _Bicheno_, _Biloela_, _Birchip_, _Bombala_, _Bonalbo_, _Brookto_, _Bruthen_, _Buderim_, _Candelo_, _Capella_, _Carcoar_, _Carnama_, _Chewton_, _Anglesea_, _Ardrossa_, _Armidale_, _Atherton_, _Balaklav_, _Ballarat_, _Barellan_, _Belgrave_, _Berrigan_, _Binalong_, _Binnaway_, _Blackall_, _Boggabri_, _Bridport_, _Brooklyn_, _Buckland_, _Bundeena_, _Bungonia_, _Busselto_, _Calliope_, _Cardwell_, _Cassilis_, _Cessnock_, _Charlton_.\n\n\n::: {#fig-forest-plot-expertise .cell}\n\n```{.r .cell-code}\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){\n if (MA_mean == FALSE){\n data <- filter(data, Parameter != \"overall\")\n }\n \n p <- ggplot(data, aes(y = estimate, \n x = term, \n ymin = conf.low, \n ymax = conf.high,\n shape = parameter_type,\n colour = parameter_type)) +\n geom_pointrange(fatten = 2) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")#,\n # axis.text.y = element_blank()\n ) +\n guides(shape = guide_legend(title = NULL), \n colour = guide_legend(title = NULL)) +\n coord_flip() +\n ylab(bquote(Standardised~Effect~Size~Z[r])) +\n xlab(element_blank()) +\n # scale_y_continuous(breaks = c(-4,-3,-2,-1,0,1),\n # minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\")\n \n if(intercept == TRUE){\n p <- p + geom_hline(yintercept = 0)\n }\n if(MA_mean == TRUE){\n p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n data = data,\n colour = \"#01353D\", \n linetype = \"dashed\")\n }\n \n return(p)\n}\n\nfilter_experts <- \n rlang::exprs(\n exclusion_set == \"complete\", \n estimate_type == \"Zr\", \n model_name == \"MA_mod\",\n publishable_subset == \"All\", \n expertise_subset == \"expert\")\n\nbt_experts_only <- \n ManyEcoEvo_viz %>% \n filter(!!!filter_experts, \n dataset == \"blue tit\") %>% \n select(model) %>% \n mutate(plot_data = map(model, \n .f = ~ broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE)%>% \n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\")) \n\n# bt_experts_only <- \n# bt_experts_only %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., bt_experts_only$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\nbt_forest_experts <- bt_experts_only %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-1.6, 0.65)) \n\neuc_experts_only <- \n ManyEcoEvo_viz %>% \n filter(!!!filter_experts, \n dataset == \"eucalyptus\") %>% \n select(model) %>% \n mutate(plot_data = map(model, \n .f = ~ broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE) %>% \n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\"))\n\n# euc_experts_only <- \n# euc_experts_only %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., euc_experts_only$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\neuc_forest_experts <- euc_experts_only %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-5, 1), \n breaks = c(-5, -4, -3, -2, -1, 0, 1) )\n\n# ---- Extract Viz ----\n\nbt_forest_experts\n\neuc_forest_experts\n```\n\n::: {.cell-output-display}\n![Blue tit dataset analyses](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-expertise-1.png){#fig-forest-plot-expertise-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus* dataset analyses](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-expertise-2.png){#fig-forest-plot-expertise-2 width=672}\n:::\n\nEstimated meta-analytic mean effect size ($Z_r$), standard error, and 95% confidence intervals, from analyses of the primary data set with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statistical analyses in their research area.\n:::\n\n\n#### Post-hoc analysis: Exploring the effect of excluding analyses of the blue tit dataset containing highly collinear predictor variables\n\nFor the blue tit dataset, we created a subset of analyses that excluded effects based on analyses containing highly correlated predictor variables. Excluded analyses are exported internally in the `ManyEcoEvo` package as `ManyEcoEvo::collinearity_subset`. Analyses with the following identifiers are excluded in the reduced subset: _Armadal-1-1-1_, _Babinda-1-1-1_, _Babinda-2-2-1_, _Barham-1-1-1_, _Barham-2-2-1_, _Bega-1-1-1_, _Bega-1-1-2_, _Bega-2-2-1_, _Bega-2-2-2_, _Borde-1-1-1_, _Bruc-1-1-1_, _Caigun-1-1-1_, _Caigun-2-2-1_, _Adelong-1-1-1_, _Adelong-2-2-1_.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfilter_collinear <- rlang::exprs(exclusion_set == \"complete\", \n publishable_subset == \"All\", \n expertise_subset == \"All\", \n collinearity_subset == \"collinearity_removed\",\n model_name == \"MA_mod\",\n dataset == \"blue tit\")\n\n# summary_output_params <- rlang::exprs(tidy_mod_summary, MA_fit_stats, mod_fit_stats)\n\nManyEcoEvo_viz %>% \n filter(!!!filter_collinear) %>% \n mutate(plot_data = map(model, \n .f = ~ broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE)%>% \n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-1.5, 0.5), \n breaks = c(-1.5, -1, -0.5, 0, 0.5) )\n```\n\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated effect-sizes $Z_{r}$, standard error and 95% confidence intervals of Blue tit analyses with highly collinear analyses removed. The meta-analytic mean for the reduced subset is denoted by the black triangle, and a dashed vertical line, with error bars representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0.](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-Zr-collinear-rm-subset-1.png){#fig-forest-plot-Zr-collinear-rm-subset width=672}\n:::\n:::\n\n\n### Out of sample predictions $y_i$\n\n#### Non-truncated $y_{i}$ meta-analysis forest plot\n\nBelow is the non-truncated version of @fig-euc-yi-forest-plot showing a forest plot of the out-of-sample predictions, $y_{i}$, on the response-scale (stems counts), for *Eucalyptus* analyses, showing the full error bars of all model estimates.\n\n\n::: {.cell}\n\n```{.r .cell-code}\nplot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numeric(2L)){\n if(MA_mean == FALSE){\n data <- filter(data, study_id != \"overall\")\n }\n \n plot_data <- data %>% \n group_by(study_id) %>% \n group_nest() %>% \n hoist(data, \"estimate\",.remove = FALSE) %>% \n hoist(estimate, y50 = 2) %>% \n select(-estimate) %>% \n unnest(data) %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(y50),.by_group = TRUE) %>% \n mutate(study_id = forcats::as_factor(study_id),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"diamond\",\n TRUE ~ \"circle\"))\n \n p <- ggplot(plot_data, aes(y = estimate, \n x = study_id,\n ymin = conf.low, \n ymax = conf.high,\n # shape = type,\n shape = point_shape,\n colour = estimate_type\n )) +\n geom_pointrange(position = position_dodge(width = 0.5)) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip(ylim = y_zoom) +\n labs(y = \"Model estimated out of sample predictions, stem counts\",\n x = element_blank()) +\n scale_y_continuous(breaks = scales::breaks_extended(10)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\") \n \n if(intercept == TRUE){\n p <- p + geom_hline(yintercept = 0)\n }\n if(MA_mean == TRUE){\n p <- p +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y25\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#01353D\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y50\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#088096\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y75\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#58B3C7\" ,\n linetype = \"dashed\")\n }\n \n print(p)\n}\n\n# TODO put into R/ and build into package to call!\nfit_MA_mv <- function(effects_analysis, Z_colname, VZ_colname, estimate_type){\n Zr <- effects_analysis %>% pull({{Z_colname}})\n VZr <- effects_analysis %>% pull({{VZ_colname}})\n mod <- ManyEcoEvo::fit_metafor_mv(estimate = Zr, \n variance = VZr, \n estimate_type = estimate_type, \n data = effects_analysis)\n return(mod)\n}\n\nback_transformed_predictions <- \n ManyEcoEvo_yi %>% \n dplyr::mutate(data = \n purrr::map(data, \n ~ dplyr::filter(.x,\n stringr::str_detect(response_variable_type, \"constructed\", negate = TRUE)))) %>% \n prepare_response_variables_yi(estimate_type = \"yi\",\n param_table = ManyEcoEvo:::analysis_data_param_tables) %>% \n generate_yi_subsets()\n\n\nraw_mod_data_logged <- \n back_transformed_predictions %>% \n filter(dataset == \"eucalyptus\") %>%\n group_by(estimate_type) %>% \n select(estimate_type, data) %>% \n unnest(data) %>% \n rename(study_id = id_col) %>% \n hoist(params, param_mean = list(\"value\", 1), param_sd = list(\"value\", 2)) %>% \n rowwise() %>% \n mutate(exclusion_threshold = param_mean + 3*param_sd) %>% \n filter(fit < exclusion_threshold) %>% \n mutate(log_vals = map2(fit, se.fit, log_transform, 1000)) %>% \n unnest(log_vals) %>%\n select(study_id, \n TeamIdentifier,\n estimate_type, \n starts_with(\"response_\"), \n -response_id_S2, \n ends_with(\"_log\")) %>% \n group_by(estimate_type) %>% \n nest()\n \n\nmod_data_logged <- raw_mod_data_logged %>% \n mutate(MA_mod = \n map(data, \n ~fit_MA_mv(.x, mean_log, std.error_log, \"yi\")))\n\n\nplot_data_logged <- mod_data_logged %>% \n mutate(tidy_mod = map(.x = MA_mod,\n ~broom::tidy(.x,\n conf.int = TRUE, \n include_studies = TRUE) %>% \n rename(study_id = term))) %>% \n select(tidy_mod) %>% \n unnest(cols = c(tidy_mod)) \n\nplot_data_logged %>% \n mutate(response_scale = map2(estimate, std.error, log_back, 1000)) %>% \n select(estimate_type, study_id, type, response_scale) %>% \n unnest(response_scale) %>% \n rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% \n# filter(estimate <1000) %>% \n plot_forest_2(MA_mean = T,y_zoom = c(0,140))\n```\n\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated out of sample predictions, $y_{i}$, on the response-scale (stems counts), for *Eucalyptus* analyses. Circles represent individual analysis estimates. Triangles represent the meta-analytic mean for each prediction scenario. Navy blue coloured points correspond to $y_{25}$ scenario, blue coloured points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (observations more than 3SD above the mean) have been removed prior to model fitting.](SM2_EffectSizeAnalysis_files/figure-html/fig-euc-yi-forest-plot-full-1.png){#fig-euc-yi-forest-plot-full width=672}\n:::\n:::\n", + "markdown": "---\ntitle: \"Effect Size Analysis\"\n# lib-dir: \"renv/library/R-4.4/aarch64-apple-darwin20/\"\nformat: \n html:\n code-fold: true\n echo: true\neditor: visual\nnumber-sections: true\npre-render: \"utils.R\"\nexecute:\n freeze: auto # re-render only when source changes\n---\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(withr)\nlibrary(here)\nlibrary(tidyverse)\nlibrary(performance)\nlibrary(broom.mixed)\nlibrary(gt)\nlibrary(gtExtras)\nlibrary(lme4)\nlibrary(MuMIn)\nlibrary(ManyEcoEvo)\nlibrary(ggrepel)\nlibrary(glue)\nlibrary(gluedown)\nset.seed(1234)\nsource(here::here(\"utils.R\"))\n```\n:::\n\n::: {.cell eavl='true'}\n\n```{.r .cell-code}\nManyEcoEvo_results <- \n ManyEcoEvo_results %>% \n mutate(effects_analysis = \n map(effects_analysis, \n rename, \n id_col = study_id)) #%>% \n # mutate_at(c(\"data\", \n # \"diversity_data\", \n # \"diversity_indices\", \n # \"effects_analysis\"),\n # .funs = ~ map(.x, anonymise_teams,\n # TeamIdentifier_lookup))\n```\n:::\n\n\n\n\n## Meta-analysis\n\n### Effect Sizes $Z_r$\n\n#### Effect of categorical review rating\n\nThe figures below (@fig-euc-cat-ratings-MA, @fig-bt-cat-ratings-MA) hows the fixed effect of categorical review rating on deviation from the meta-analytic mean. There is very little difference in deviation for analyses in any of the review categories. It is worth noting that each analysis features multiple times in these figures corresponding to the multiple reviewers that provided ratings.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\norchard_publishability <- function(dat){\n rma_mod_rating <- \n metafor::rma.mv(yi = Zr, \n V = VZr, \n data = dat, \n control = list(maxiter = 1000),mods = ~ PublishableAsIs,\n sparse = TRUE,\n random = list(~1|response_id, ~1|ReviewerId)) \n \n orchaRd::orchard_plot(rma_mod_rating, \n mod = \"PublishableAsIs\", \n group = \"id_col\", \n xlab = \"Standardised Correlation Coefficient (Zr)\",\n cb = TRUE,angle = 45) \n}\n\nManyEcoEvo_results$effects_analysis[[2]] %>% \n filter(Zr > -4) %>% \n unnest(review_data) %>% \n select(Zr, VZr, id_col, PublishableAsIs, ReviewerId, response_id) %>% \n mutate(PublishableAsIs = forcats::as_factor(PublishableAsIs) %>% \n forcats::fct_relevel(c(\"deeply flawed and unpublishable\", \n \"publishable with major revision\", \n \"publishable with minor revision\", \n \"publishable as is\" ))) %>% \n orchard_publishability() +\n theme(text = element_text(size = 20),axis.text.y = element_text(size = 20)) +\n scale_x_discrete(labels=c(\"Deeply Flawed\\n & Unpublishable\", \"Publishable With\\n Major Revision\", \"Publishable With\\n Minor Revision\", \"Publishable\\n As Is\"))\n```\n\n::: {.cell-output-display}\n![Orchard plot of meta-analytic model fitted to all *Eucalyptus* analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.](SM2_EffectSizeAnalysis_files/figure-html/fig-euc-cat-ratings-MA-1.png){#fig-euc-cat-ratings-MA width=768}\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nManyEcoEvo_results$effects_analysis[[1]] %>% \n# filter(Zr > -4) %>% \n unnest(review_data) %>% \n select(Zr, VZr, id_col, PublishableAsIs, ReviewerId, response_id) %>% \n mutate(PublishableAsIs = forcats::as_factor(PublishableAsIs) %>% \n forcats::fct_relevel(c(\"deeply flawed and unpublishable\", \n \"publishable with major revision\", \n \"publishable with minor revision\", \n \"publishable as is\" ))) %>% \n orchard_publishability() +\n theme(text = element_text(size = 20),axis.text.y = element_text(size = 20)) +\n scale_x_discrete(labels=c(\"Deeply Flawed\\n & Unpublishable\", \"Publishable With\\n Major Revision\", \"Publishable With\\n Minor Revision\", \"Publishable\\n As Is\"))\n```\n\n::: {.cell-output-display}\n![Orchard plot of meta-analytic model fitted to all blue tit analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.](SM2_EffectSizeAnalysis_files/figure-html/fig-bt-cat-ratings-MA-1.png){#fig-bt-cat-ratings-MA width=768}\n:::\n:::\n\n\n\n\n#### Post-hoc analysis: Exploring the effect of removing analyses with poor peer-review ratings on heterogeneity\n\nThe forest plots in @fig-all-forest-plots-Zr compare the distributions of $Z_r$ effects from our full set of analyses with the distributions of $Z_r$ effects from our post-hoc analyses, which removed either analyses that were reviewed at least once as being 'unpublishable', and analyses that were reviewed at least once as being 'unpublishable' or requiring 'major revisions'. Removing these analyses from the blue tit data had little impact on the overall distribution of the results. When 'unpublishable' analyses of the *Eucalyptus* dataset were removed, the extreme outlier 'Brooklyn-2-2-1' was also removed, resulting in a substantial difference to the amount of observed deviation from the meta-analytic mean.\n\n\n\n\n::: {.cell .column-body-outset-right}\n\n```{.r .cell-code}\n# TeamIdentifier_lookup <- read_csv(here::here(\"data-raw/metadata_and_key_data/TeamIdentifierAnonymised.csv\"))\n\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){\n if (MA_mean == FALSE){\n data <- filter(data, Parameter != \"overall\")\n }\n \n p <- ggplot(data, aes(y = estimate, \n x = term, \n ymin = conf.low, \n ymax = conf.high,\n shape = point_shape,\n colour = parameter_type)) +\n geom_pointrange(fatten = 2) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, \n colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")#,\n # axis.text.y = element_blank()\n ) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip() +\n ylab(bquote(Standardised~Effect~Size~Z[r])) +\n xlab(element_blank()) +\n # scale_y_continuous(breaks = c(-4,-3,-2,-1,0,1),\n # minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\")\n \n if(intercept == TRUE){\n p <- p + geom_hline(yintercept = 0)\n }\n if(MA_mean == TRUE){\n p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n data = data,\n colour = \"#01353D\", \n linetype = \"dashed\")\n }\n \n return(p)\n}\n\npublishable_subsets_forest_data <- \n ManyEcoEvo_viz %>% \n filter(model_name == \"MA_mod\",\n exclusion_set == \"complete\",\n expertise_subset == \"All\") %>% \n select(ends_with(\"set\"), model, -expertise_subset) %>% \n mutate(plot_data = \n map(model, \n .f = \n ~broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE) %>% \n mutate(Parameter = \n forcats::fct_reorder(term, estimate))),\n meta_analytic_mean = \n map_dbl(plot_data, \n ~ filter(.x, \n Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(dataset, \n publishable_subset, \n plot_data, \n meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = \n case_when(\n str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n group_by(dataset, publishable_subset) %>%\n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\")) \n\n# publishable_subsets_forest_data <- \n# publishable_subsets_forest_data %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., publishable_subsets_forest_data$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\nlibrary(tidytext)\n\ntidy_overall_labeller <- . %>% \n str_split(\"_\") %>% \n flatten_chr() %>% \n pluck(1)\n\ntidy_forest_labels <- Vectorize(tidy_overall_labeller)\n\npublishable_subsets_forest_data %>% \n group_by(dataset, publishable_subset) %>% \n mutate(term = case_when(term == \"overall\" ~ \n paste(term, \n dataset, \n publishable_subset,\n sep = \"_\"), \n TRUE ~ term),\n dataset = case_when(dataset == \"blue tit\" ~ \"Blue tit\",\n dataset == \"eucalyptus\" ~ \"Eucalyptus\",\n TRUE ~ NA)) %>% \n arrange(across(.cols = c(type, estimate)),\n .by_group = TRUE) %>% \n rowid_to_column() %>% \n mutate(term = reorder(term, rowid),\n publishable_subset = \n case_when(publishable_subset == \"All\" ~ \n \"All analyses\",\n publishable_subset == \"data_flawed\" ~ \n \"'Unpublishable'\\nremoved\",\n publishable_subset == \"data_flawed_major\" ~ \n \"'Unpublishable' &\\n'Major Revisions'\\nremoved\",\n TRUE ~ \"\")) %>% \n plot_forest() +\n scale_x_reordered(labels = tidy_forest_labels) +\n ggh4x::facet_nested(dataset ~ publishable_subset,\n independent = \"y\", \n scales = \"free\")\n```\n\n::: {.cell-output-display}\n![Forest plots of meta-analytic estimated standardized effect sizes ($Z_r$, blue circles) and their 95% confidence intervals for each effect size included in the meta-analysis model. The meta-analytic mean effect size is denoted by a black triangle and a dashed vertical line, with error bars also representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0, indicating no relationship between the test variable and the response variable. The left side of each panel shows the analysis team names (anonymous arbitrary names assigned by us), each followed by three numbers. The first number is the submission ID (some analyst teams submitted results to us on >1 submission form), the second number is the analysis ID (some analyst teams included results of >1 analysis in a given submission), and the third number is the effect ID (some analysts submitted values for >1 effect per analysis). Thus, each row in each forest plot is uniquely identified, but it is possible to determine which effects come from which analyses and which analysis teams. The plots in the top row depict effects from analyses of blue tit data, and the bottom row plots depict effects from analyses of Eucalyptus data. The right-most plots depict all usable effect sizes. The plots on the left exclude effects from analysis sets that received at least one rating of “unpublishable” from peer reviewers, and the plots in the middle exclude effects from analysis sets that received at least one rating of either “unpublishable” or “major revision” from peer reviewers.](SM2_EffectSizeAnalysis_files/figure-html/fig-all-forest-plots-Zr-1.png){#fig-all-forest-plots-Zr width=960}\n:::\n:::\n\n\n\n\n#### Post-hoc analysis: Exploring the effect of excluding estimates in which we had reduced confidence\n\nFor each dataset (blue tit, Eucalyptus), we created a second, more conservative version, that excluded effects based on estimates of $\\mathit{df}$ that we considered less reliable (@tbl-Zr-exclusion-subsetting). We compared the outcomes of analyses of the primary dataset (constituted according to our registered plan) with the outcomes of analyses of the more conservative version of the dataset. We also compared results from analyses of both of these versions of the dataset to versions with our post-hoc removal of outliers described in the main text. Our more conservative exclusions (based on unreliable estimates of $\\mathit{df}$ ) had minimal impact on the meta-analytic mean for both blue tit and Eucalyptus analyses, regardless of whether outliers were excluded (@tbl-Zr-exclusion-subsetting).\n\n\n\n\n::: {#tbl-Zr-exclusion-subsetting .cell tbl-cap='Estimated meta-analytic mean, standard error, and 95% confidence intervals, from analyses of the primary data set, the more conservative version of the dataset which excluded effects based on less reliable estimates of $\\mathit{df}$, and both of these datasets with outliers removed.'}\n\n```{.r .cell-code}\nManyEcoEvo_viz %>% \n dplyr::filter(estimate_type == \"Zr\", \n model_name == \"MA_mod\", \n collinearity_subset != \"collinearity_removed\",\n publishable_subset == \"All\", \n expertise_subset == \"All\") %>% \n select(dataset, exclusion_set, tidy_mod_summary) %>% \n unnest(tidy_mod_summary) %>% \n filter(type == \"summary\") %>% \n select(-term, -type) %>% \n mutate(exclusion_set = \n case_when(exclusion_set == \"complete\" ~ \n \"Primary dataset\",\n exclusion_set == \"complete-rm_outliers\" ~ \n \"Primary dataset, outliers removed\",\n exclusion_set == \"partial\" ~ \n \"Conservative exclusions\",\n TRUE ~ \"Conservative exclusions, outliers removed\")) %>% \ngroup_by(exclusion_set) %>% \n gt::gt() %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::fmt(columns = \"p.value\",\n fns = function(x) gtsummary::style_pvalue(x, prepend_p = FALSE)) %>% \n gt::fmt_number(columns = c(-p.value, -dataset)) %>% \n gt::cols_label(estimate = gt::md(\"$$\\\\hat\\\\mu$$\"), \n std.error = gt::md(\"$$\\text{SE}[\\\\hat\\\\mu]$$\"),\n conf.low = gt::md(\"95\\\\%CI\")) %>% \n gt::cols_merge(columns = starts_with(\"conf\"), \n pattern = \"[{1},{2}]\") %>% \n gt::cols_move(columns = conf.low, after = std.error) %>% \n gt::tab_style(\n style = list(gt::cell_text(transform = \"capitalize\"), \n gt::cell_text(style = \"italic\")),\n locations = gt::cells_body(columns = \"dataset\", rows = dataset == \"eucalyptus\")\n ) \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
dataset

$$\\hat\\mu$$

\n

$$\text{SE}[\\hat\\mu]$$

\n

95%CI

\n
statisticp.value
Primary dataset
blue tit−0.350.03[−0.41,−0.29]−11.02<0.001
eucalyptus−0.090.06[−0.22,0.03]−1.470.14
Conservative exclusions
blue tit−0.360.03[−0.42,−0.29]−10.77<0.001
eucalyptus−0.110.07[−0.24,0.03]−1.550.12
Primary dataset, outliers removed
blue tit−0.360.03[−0.42,−0.30]−11.48<0.001
eucalyptus−0.030.01[−0.06,0.00]−2.230.026
Conservative exclusions, outliers removed
blue tit−0.360.03[−0.43,−0.30]−11.38<0.001
eucalyptus−0.040.02[−0.07,−0.01]−2.520.012
\n
\n```\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE ){\n if (MA_mean == FALSE) {\n data <- filter(data, term != \"Overall\")\n }\n \n p <- ggplot(data, aes(y = term, \n x = estimate, \n ymin = conf.low, \n ymax = conf.high,\n # shape = point_shape,\n colour = parameter_type)) +\n geom_pointrange() +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(size = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\"),\n axis.text.y = element_blank()) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip() +\n labs(y = \"Standardised Effect Size, Zr\",\n x = element_blank()) +\n scale_x_continuous(breaks = c(-4,-3,-2,-1,0,1),\n minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\")\n \n if (intercept == TRUE) {\n p <- p + geom_hline(yintercept = 0)\n }\n if (MA_mean == TRUE) {\n p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n data = data,\n colour = \"#01353D\", \n linetype = \"dashed\")\n }\n \n return(p)\n}\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ncomplete_euc_data <- \n ManyEcoEvo_viz %>% \n filter(exclusion_set == \"complete\", \n estimate_type == \"Zr\", \n model_name == \"MA_mod\",\n dataset == \"eucalyptus\",\n publishable_subset == \"All\",\n expertise_subset == \"All\") %>% \n select(tidy_mod_summary) %>% \n mutate(plot_data = map(tidy_mod_summary, \n .f = ~ dplyr::mutate(.x, \n point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\"))\n\n # ManyEcoEvo_viz %>% \n # filter(exclusion_set == \"complete\", \n # estimate_type == \"Zr\", \n # model_name == \"MA_mod\",\n # dataset == \"eucalyptus\",\n # publishable_subset == \"All\",\n # expertise_subset == \"All\") %>% \n # )\n\nmin_outlier_euc <- complete_euc_data %>% \n filter(type == \"study\") %>% \n slice_min(estimate, n = 3) %>% \n pull(term)\n\nsample_size_euc_Zr <- ManyEcoEvo_results %>% \n filter(exclusion_set == \"complete\", dataset == \"eucalyptus\") %>% \n pluck(\"data\", 1) %>% \n select(id_col, sample_size) %>% \n rename(term = id_col) %>% \n mutate(sample_size = as.numeric(sample_size))\n\nmean_n_euc_Zr <- sample_size_euc_Zr %>% \n drop_na(sample_size) %>% \n pull(sample_size) %>% \n mean() %>% \n round(2)\n\nN_outliers_Zr_euc <- sample_size_euc_Zr %>% \n filter(term %in% min_outlier_euc) %>% \n arrange(desc(sample_size))\n```\n:::\n\n\n\n\n#### Post-hoc analysis: Exploring the effect of including only analyses conducted by analysis teams with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statitistical analyses in their research area\n\nThe anonymous Team Identifiers in the reduced subset of \"expert\" or \"highly proficient\" analysts are exported internally in the `ManyEcoEvo` package as `ManyEcoEvo:::expert_subset`. Analyses from the following teams are retained in the reduced subset: _Bell_, _Berr_, _Brim_, _Bruc_, _Burr_, _Byng_, _Cape_, _Clar_, _Clev_, _Alban_, _Alpha_, _Bargo_, _Berry_, _Bowen_, _Bulli_, _Aldgat_, _Alding_, _Anakie_, _Aramac_, _August_, _Bamaga_, _Barham_, _Barmah_, _Batlow_, _Beltan_, _Bethan_, _Beulah_, _Bindoo_, _Boonah_, _Bowral_, _Bright_, _Buchan_, _Burnie_, _Cairns_, _Casino_, _Cattai_, _Adelong_, _Angasto_, _Antwerp_, _Arltung_, _Ashford_, _Babinda_, _Bargara_, _Barooga_, _Barraba_, _Belmont_, _Bemboka_, _Benalla_, _Bendigo_, _Berrima_, _Berwick_, _Beverle_, _Bicheno_, _Biloela_, _Birchip_, _Bombala_, _Bonalbo_, _Brookto_, _Bruthen_, _Buderim_, _Candelo_, _Capella_, _Carcoar_, _Carnama_, _Chewton_, _Anglesea_, _Ardrossa_, _Armidale_, _Atherton_, _Balaklav_, _Ballarat_, _Barellan_, _Belgrave_, _Berrigan_, _Binalong_, _Binnaway_, _Blackall_, _Boggabri_, _Bridport_, _Brooklyn_, _Buckland_, _Bundeena_, _Bungonia_, _Busselto_, _Calliope_, _Cardwell_, _Cassilis_, _Cessnock_, _Charlton_.\n\n\n\n\n::: {#fig-forest-plot-expertise .cell}\n\n```{.r .cell-code}\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){\n if (MA_mean == FALSE){\n data <- filter(data, Parameter != \"overall\")\n }\n \n p <- ggplot(data, aes(y = estimate, \n x = term, \n ymin = conf.low, \n ymax = conf.high,\n shape = parameter_type,\n colour = parameter_type)) +\n geom_pointrange(fatten = 2) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")#,\n # axis.text.y = element_blank()\n ) +\n guides(shape = guide_legend(title = NULL), \n colour = guide_legend(title = NULL)) +\n coord_flip() +\n ylab(bquote(Standardised~Effect~Size~Z[r])) +\n xlab(element_blank()) +\n # scale_y_continuous(breaks = c(-4,-3,-2,-1,0,1),\n # minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\")\n \n if(intercept == TRUE){\n p <- p + geom_hline(yintercept = 0)\n }\n if(MA_mean == TRUE){\n p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n data = data,\n colour = \"#01353D\", \n linetype = \"dashed\")\n }\n \n return(p)\n}\n\nfilter_experts <- \n rlang::exprs(\n exclusion_set == \"complete\", \n estimate_type == \"Zr\", \n model_name == \"MA_mod\",\n publishable_subset == \"All\", \n expertise_subset == \"expert\")\n\nbt_experts_only <- \n ManyEcoEvo_viz %>% \n filter(!!!filter_experts, \n dataset == \"blue tit\") %>% \n select(tidy_mod_summary) %>% \n mutate(plot_data = map(tidy_mod_summary, \n .f = ~ dplyr::mutate(.x, \n point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\")) \n\n# bt_experts_only <- \n# bt_experts_only %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., bt_experts_only$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\nbt_forest_experts <- bt_experts_only %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-1.6, 0.65)) \n\neuc_experts_only <- \n ManyEcoEvo_viz %>% \n filter(!!!filter_experts, \n dataset == \"eucalyptus\") %>% \n select(model) %>% \n mutate(plot_data = map(model, \n .f = ~ broom::tidy(.x, \n conf.int = TRUE, \n include_studies = TRUE) %>% \n dplyr::mutate(point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\"))\n\n# euc_experts_only <- \n# euc_experts_only %>% \n# rename(id_col = term) %>% \n# group_by(type) %>% \n# group_split() %>% \n# set_names(., euc_experts_only$type %>% unique) %>% \n# # map_if(.x = ., names(.) == \"study\",\n# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% \n# bind_rows() %>% \n# rename(term = id_col)\n\neuc_forest_experts <- euc_experts_only %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-5, 1), \n breaks = c(-5, -4, -3, -2, -1, 0, 1) )\n\n# ---- Extract Viz ----\n\nbt_forest_experts\n\neuc_forest_experts\n```\n\n::: {.cell-output-display}\n![Blue tit dataset analyses](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-expertise-1.png){#fig-forest-plot-expertise-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus* dataset analyses](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-expertise-2.png){#fig-forest-plot-expertise-2 width=672}\n:::\n\nEstimated meta-analytic mean effect size ($Z_r$), standard error, and 95% confidence intervals, from analyses of the primary data set with at least one member self-rated as \"highly proficient\" or \"expert\" in conducting statistical analyses in their research area.\n:::\n\n\n\n\n#### Post-hoc analysis: Exploring the effect of excluding analyses of the blue tit dataset containing highly collinear predictor variables\n\nFor the blue tit dataset, we created a subset of analyses that excluded effects based on analyses containing highly correlated predictor variables. Excluded analyses are exported internally in the `ManyEcoEvo` package as `ManyEcoEvo::collinearity_subset`. Analyses with the following identifiers are excluded in the reduced subset: _Armadal-1-1-1_, _Babinda-1-1-1_, _Babinda-2-2-1_, _Barham-1-1-1_, _Barham-2-2-1_, _Bega-1-1-1_, _Bega-1-1-2_, _Bega-2-2-1_, _Bega-2-2-2_, _Borde-1-1-1_, _Bruc-1-1-1_, _Caigun-1-1-1_, _Caigun-2-2-1_, _Adelong-1-1-1_, _Adelong-2-2-1_.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfilter_collinear <- rlang::exprs(exclusion_set == \"complete\", \n publishable_subset == \"All\", \n expertise_subset == \"All\", \n collinearity_subset == \"collinearity_removed\",\n model_name == \"MA_mod\",\n dataset == \"blue tit\")\n\n# summary_output_params <- rlang::exprs(tidy_mod_summary, MA_fit_stats, mod_fit_stats)\n\nManyEcoEvo_viz %>% \n filter(!!!filter_collinear) %>% \n mutate(plot_data = map(tidy_mod_summary, \n .f = ~ dplyr::mutate(.x, \n point_shape = \n ifelse(stringr::str_detect(term, \"overall\"), \n \"diamond\", \n \"circle\"),\n Parameter = \n forcats::fct_reorder(term, \n estimate) %>% \n forcats::fct_reorder(., \n point_shape,\n .desc = TRUE))\n ),\n meta_analytic_mean = map_dbl(plot_data, \n ~ filter(.x, Parameter == \"overall\") %>% \n pull(estimate))) %>% \n select(plot_data, meta_analytic_mean) %>% \n unnest(cols = c(\"plot_data\")) %>% \n mutate(parameter_type = case_when(str_detect(Parameter, \"overall\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(estimate),.by_group = TRUE) %>% \n mutate(term = forcats::as_factor(term),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"mean\",\n TRUE ~ \"study\")) %>% \n plot_forest(intercept = TRUE, MA_mean = TRUE) +\n theme(axis.text.x = element_text(size = 15), \n axis.title.x = element_text(size = 15),\n axis.text.y = element_blank()\n ) +\n scale_y_continuous(limits = c(-1.5, 0.5), \n breaks = c(-1.5, -1, -0.5, 0, 0.5) )\n```\n\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated effect-sizes $Z_{r}$, standard error and 95% confidence intervals of blue tit analyses with highly collinear analyses removed. The meta-analytic mean for the reduced subset is denoted by the black triangle, and a dashed vertical line, with error bars representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0.](SM2_EffectSizeAnalysis_files/figure-html/fig-forest-plot-Zr-collinear-rm-subset-1.png){#fig-forest-plot-Zr-collinear-rm-subset width=672}\n:::\n:::\n\n\n\n\n### Out of sample predictions $y_i$\n\n#### Excluded analyses with constructed variables {#sec-excluded-yi}\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nby <- join_by(response_variable_name) # don't join on id_col: inc. other excl.\n\n# Analyst Constructed Variables\nall_constructed_vars <- \n ManyEcoEvo %>% \n pull(data, dataset) %>% \n list_rbind(names_to = \"dataset\") %>% \n filter(str_detect(response_variable_type, \"constructed\")) %>% \n distinct(dataset,response_variable_name) %>% \n drop_na() %>% \n arrange()\n\n# Constructed Variables Included in the ManyAnalysts meta-analysis\n# (i.e. we have included them in the parameter tables)\nManyEcoEvo_yi_constructed_vars <-\n ManyEcoEvo:::analysis_data_param_tables %>% \n distinct(variable, dataset) %>% \n rename(response_variable_name = variable) %>% \n semi_join(all_constructed_vars, by) %>% \n filter(!str_detect(response_variable_name, \n \"average.proportion.of\")) # was excluded\n\nyi_constructed <-\n ManyEcoEvo_yi_results %>% \n pull(data, dataset) %>% \n list_rbind(names_to = \"dataset\") %>% \n filter(str_detect(response_variable_type, \"constructed\")) %>% \n distinct(dataset, id_col, TeamIdentifier, response_variable_name) %>% \n drop_na() \n\nexcluded_yi_constructed <- \n ManyEcoEvo %>% \n pull(data, dataset) %>% \n list_rbind(names_to = \"dataset\") %>% \n filter(str_detect(response_variable_type, \"constructed\"),\n str_detect(exclusions_all, \"retain\")) %>% \n distinct(dataset, id_col, TeamIdentifier, response_variable_name) %>% \n drop_na() %>% \n anti_join(yi_constructed, by) #rm response vars in yi_constructed\n\nn_dropped_analyses <- \n excluded_yi_constructed %>% \n n_distinct(\"id_col\")\n\nn_teams_w_dropped_analyses <- \n excluded_yi_constructed %>% \n group_by(TeamIdentifier) %>% \n count() %>% \n n_distinct(\"TeamIdentifier\")\n```\n:::\n\n\n\n\nWe standardized the $y_i$ estimates and their standard errors for the blue tit analyses using the population mean and standard deviations of the corresponding dependent variable for that analysis, as shown in @eq-Z-VZ, using the function `ManyEcoEvo::Z_VZ_preds()`. Note that this is NOT the same process as standardizing the effect sizes $Z_r$. We used the mean and standard deviation of the relevant raw datasets as our 'population' parameters.\n\n$$\nZ_j = \\frac{\\mu_i-\\bar{x}_j}{\\text{SD}_j} \\\\\n\\\\\n{\\text{VAR}}_{Z_j} = \\frac{{\\text{SE}}_{\\mu_i}}{{\\text{SD}_j}} \\\\\n$$ {#eq-Z-VZ}\n\nWhere $\\mu$ is the population parameter taken from our original dataset for variable $i$, and $\\bar{x}_j$ and $\\text{SD}_j$ are the out of sample point estimate values supplied for analysis $j$. $\\text{SE}_{{\\mu}_{i}}$ is the standard error of the population mean for variable $i$, while ${\\text{VAR}}_{{Z}_{j}}$ and ${Z}_{j}$ are the standardized variance and mean estimate for analysis $j$. Note that for the response variables that were scaled-and-centered, or else mean-centred before model fitting, we do not need to standardise because these are already on the Z-scale. In doing so we make the assumption that analysts' data subsetting will have little effect on the outcomes. For some analyses of the blue tit dataset, analysts constructed their own unique response variables, which meant we needed to reconstruct these variables in order to calculate the population parameters. Unfortunately we were not able to re-construct all variables used by the analysts, as we were unable to reproduce the data required for their re-construction, e.g. we were unable to reproduce principal component analyses or fitted models for extracting residuals [@tbl-constructed-var-exclusions]. A total of 15 were excluded from out-of-sample meta-analysis, from 10 teams, including the following analysis identifiers: _Bruc-1-1-1_, _Clar-2-2-1_, _Clar-1-1-1_, _Batlow-1-1-1_, _Batlow-1-1-2_, _Bindoo-1-1-1_, _Bourke-1-1-1_, _Buchan-1-1-1_, _Arltung-4-4-1_, _Bargara-1-1-1_, _Bendigo-5-5-1_, _Booligal-3-3-1_, _Booligal-2-2-1_, _Booligal-4-4-1_ and _Booligal-5-5-1_.\n\n\n\n\n::: {#tbl-constructed-var-exclusions .cell tbl-cap='Analyst-constructed variables and their inclusion in meta-analyses of out-of-sample predictions, $y\\_i$.'}\n\n```{.r .cell-code}\nall_constructed_vars %>% \n semi_join(ManyEcoEvo_yi_constructed_vars, by) %>% \n mutate(included_in_yi = TRUE) %>% \n bind_rows(\n {\n all_constructed_vars %>% \n anti_join(ManyEcoEvo_yi_constructed_vars, by) %>% \n mutate(included_in_yi = FALSE)\n }\n ) %>% \n dplyr::mutate(included_in_yi = \n case_match(included_in_yi, \n TRUE ~ \"check\", \n FALSE ~ \"xmark\" ),\n response_variable_name = \n gluedown::md_code(response_variable_name)) %>% \n group_by(dataset) %>% \n gt::gt() %>% \n gt::cols_label(response_variable_name = \"Constructed Variable\",\n included_in_yi = gt::md(\"Variable reconstructed for meta-analysis?\")) %>%\n gt::fmt_icon(included_in_yi) %>% \n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"eucalyptus\")) %>%\n gt::tab_style(style = cell_text(align = \"center\"), \n locations = cells_body(columns = included_in_yi)) %>% \n gt::tab_style(style = cell_text(align = \"left\"), \n locations = cells_body(columns = response_variable_name)) %>% \n gt::tab_style(style = cell_text(align = \"left\"), \n locations = cells_column_labels(response_variable_name)) %>% \n gt::tab_style(locations = cells_body(columns = response_variable_name), \n style = cell_text(size = \"small\")) %>% \n gt::fmt_markdown(columns = response_variable_name) %>% \n gt::opt_stylize(style = 6, color = \"gray\", add_row_striping = TRUE) %>% \n gt::opt_row_striping(row_striping = TRUE) \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n \n \n \n\n \n\n \n\n \n\n \n\n \n \n \n
Constructed Variable

Variable reconstructed for meta-analysis?

\n
blue tit

day_14_weight/day_14_tarsus_length

\n
Check

day_14_weight/(day_14_tarsus_length^2)

\n
Check

SMI

\n
Xmark

day_14_tarsus_length_group_deviation

\n
Xmark

day_14_weight_group_deviation

\n
Xmark

PC1.day_14_weight.day_14_tarsus_length

\n
Xmark

day_14_tarsus_length_deviation

\n
Xmark

residual_day14_weight

\n
Xmark

residual_day_14_weight_males

\n
Xmark
eucalyptus

euc_sdlgs0_2m

\n
Check

euc_sdlgs_all

\n
Check

euc_sdlgs>50cm

\n
Check

small*0.25+medium*1.25+large*2.5

\n
Check

average.proportion.of.plots.containing.at.least.one.euc.seedling.of.any.size

\n
Xmark
\n
\n```\n\n:::\n:::\n\n\n\n\n#### Non-truncated $y_{i}$ meta-analysis forest plot\n\nBelow is the non-truncated version of @fig-euc-yi-forest-plot showing a forest plot of the out-of-sample predictions, $y_{i}$, on the response-scale (stem counts), for *Eucalyptus* analyses, showing the full error bars of all model estimates.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nplot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numeric(2L)){\n if(MA_mean == FALSE){\n data <- filter(data, study_id != \"overall\")\n }\n \n plot_data <- data %>% \n group_by(study_id) %>% \n group_nest() %>% \n hoist(data, \"estimate\",.remove = FALSE) %>% \n hoist(estimate, y50 = 2) %>% \n select(-estimate) %>% \n unnest(data) %>% \n arrange(desc(type)) %>% \n mutate(type = forcats::as_factor(type)) %>% \n group_by(type) %>% \n arrange(desc(y50),.by_group = TRUE) %>% \n mutate(study_id = forcats::as_factor(study_id),\n point_shape = case_when(str_detect(type, \"summary\") ~ \"diamond\",\n TRUE ~ \"circle\"))\n \n p <- ggplot(plot_data, aes(y = estimate, \n x = study_id,\n ymin = conf.low, \n ymax = conf.high,\n # shape = type,\n shape = point_shape,\n colour = estimate_type\n )) +\n geom_pointrange(position = position_dodge(width = 0.5)) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip(ylim = y_zoom) +\n labs(y = \"Model estimated out of sample predictions, stem counts\",\n x = element_blank()) +\n scale_y_continuous(breaks = scales::breaks_extended(10)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\") \n \n if(intercept == TRUE){\n p <- p + geom_hline(yintercept = 0)\n }\n \n if(MA_mean == TRUE){\n p <- p +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y25\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#01353D\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y50\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#088096\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y75\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#58B3C7\" ,\n linetype = \"dashed\")\n }\n \n print(p)\n}\n\n# ---- new code ----\n\neucalyptus_yi_plot_data <- \n ManyEcoEvo_yi_viz %>% \n filter(dataset == \"eucalyptus\", model_name == \"MA_mod\") %>% \n unnest(cols = tidy_mod_summary) %>% \n mutate(response_scale = list(log_back(estimate, std.error, 1000)), \n .by = c(dataset, estimate_type, term, type), \n .keep = \"used\") %>% \n select(-estimate, -std.error) %>% \n unnest_wider(response_scale) %>% \n rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% \n nest(tidy_mod_summary = c(-dataset, -estimate_type)) %>% #extract euc data for plotting (on count scale, not log scale)\n select(dataset, estimate_type, tidy_mod_summary) %>% \n unnest(cols = tidy_mod_summary) %>% \n rename(study_id = term) %>% \n ungroup()\n\nmax_x_axis <- \n eucalyptus_yi_plot_data %>% \n pluck(\"conf.high\", max) %>% \n round() + 10\n\neucalyptus_yi_plot_data %>% \n plot_forest_2(MA_mean = T, y_zoom = c(0, max_x_axis)) +\n theme(axis.text.y = element_blank())\n```\n\n::: {.cell-output-display}\n![Forest plot of meta-analytic estimated out of sample predictions, $y_{i}$, on the response-scale (stem counts) for *Eucalyptus* analyses. Circles represent individual analysis estimates. Triangles represent the meta-analytic mean for each prediction scenario. Navy blue coloured points correspond to $y_{25}$ scenario, blue coloured points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (i.e. observations with mean estimates more than 3SD above the population parameter mean, see @sec-excluded-yi) have been removed prior to model fitting.](SM2_EffectSizeAnalysis_files/figure-html/fig-euc-yi-forest-plot-full-1.png){#fig-euc-yi-forest-plot-full width=672}\n:::\n:::\n", "supporting": [ "SM2_EffectSizeAnalysis_files/figure-html" ], diff --git a/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-euc-yi-forest-plot-full-1.png b/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-euc-yi-forest-plot-full-1.png index 325c41b..4d94b7a 100644 Binary files a/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-euc-yi-forest-plot-full-1.png and b/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-euc-yi-forest-plot-full-1.png differ diff --git a/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-forest-plot-bt-yi-SM-1.png b/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-forest-plot-bt-yi-SM-1.png deleted file mode 100644 index c333e80..0000000 Binary files a/_freeze/supp_mat/SM2_EffectSizeAnalysis/figure-html/fig-forest-plot-bt-yi-SM-1.png and /dev/null differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/execute-results/html.json b/_freeze/supp_mat/SM3_ExplainingDeviation/execute-results/html.json new file mode 100644 index 0000000..4e18bf1 --- /dev/null +++ b/_freeze/supp_mat/SM3_ExplainingDeviation/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "cc2781be2fe554df18315c390df2e45c", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"Explaining Variation in Deviation Scores\"\nformat: \n html:\n code-fold: true\n echo: true\nnumber-sections: true\nexecute:\n freeze: auto # re-render only when source changes\npre-render: \"utils.R\"\nbibliography: \n - ../ms/references.bib\n - ../ms/grateful-refs.bib\n---\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(withr)\nlibrary(here)\nlibrary(tidyverse)\nlibrary(performance)\nlibrary(broom.mixed)\nlibrary(gt)\nlibrary(lme4)\nlibrary(parameters) #must be loaded directly else parameters fail\nlibrary(MuMIn)\nlibrary(ManyEcoEvo)\nlibrary(tidymodels)\nlibrary(multilevelmod)\nlibrary(rlang)\n\nset.seed(1234)\nsource(here::here(\"utils.R\"))\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nplot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){\n if (MA_mean == FALSE) {\n data <- filter(data, study_id != \"overall\")\n }\n \n data <- data %>% \n group_by(study_id) %>% \n group_nest() %>% \n hoist(data, \"estimate\",.remove = FALSE) %>% \n hoist(estimate, y50 = 2) %>% \n select(-estimate) %>% \n unnest(data) %>% \n arrange(y50) %>% \n mutate(point_shape = case_when(str_detect(type, \"summary\") ~ \"diamond\",\n TRUE ~ \"circle\"))\n \n p <- ggplot(data, aes(y = estimate, \n x = reorder(study_id, y50),\n ymin = conf.low, \n ymax = conf.high,\n shape = point_shape,\n colour = estimate_type\n )) +\n geom_pointrange(position = position_jitter(width = 0.1)) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip() +\n labs(y = \"Standardised Out of Sample Predictions, Z\",\n x = element_blank()) +\n scale_y_continuous(breaks = seq(from = round(min(data$conf.low)), to = round(max(data$conf.high)), by = 1),\n minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\") \n \n if (intercept == TRUE) {\n p <- p + geom_hline(yintercept = 0)\n }\n if (MA_mean == TRUE) {\n # p <- p + geom_hline(aes(yintercept = meta_analytic_mean), \n # data = data,\n # colour = \"#01353D\", \n # linetype = \"dashed\")\n }\n \n print(p)\n}\n\n\nplot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE){\n if (MA_mean == FALSE) {\n data <- filter(data, study_id != \"overall\")\n }\n \n plot_data <- data %>% \n group_by(study_id) %>% \n group_nest() %>% \n hoist(data, \"estimate\",.remove = FALSE) %>% \n hoist(estimate, y50 = 2) %>% \n select(-estimate) %>% \n unnest(data) %>% \n arrange(y50)\n \n p <- ggplot(plot_data, aes(y = estimate, \n x = reorder(study_id, y50),\n ymin = conf.low, \n ymax = conf.high,\n # shape = type,\n colour = estimate_type\n )) +\n geom_pointrange(position = position_dodge(width = 0.5)) +\n ggforestplot::theme_forest() +\n theme(axis.line = element_line(linewidth = 0.10, colour = \"black\"),\n axis.line.y = element_blank(),\n text = element_text(family = \"Helvetica\")) +\n guides(shape = \"none\", colour = \"none\") +\n coord_flip() +\n labs(y = \"Model estimated out of sample predictions\",\n x = element_blank()) +\n scale_y_continuous(breaks = scales::breaks_extended(10)) +\n NatParksPalettes::scale_color_natparks_d(\"Glacier\") \n \n if (intercept == TRUE) {\n p <- p + geom_hline(yintercept = 0)\n }\n if (MA_mean == TRUE) {\n p <- p +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y25\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#01353D\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y50\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#088096\",\n linetype = \"dashed\") +\n geom_hline(aes(yintercept = plot_data %>%\n filter(type == \"summary\", estimate_type == \"y75\") %>%\n pluck(\"estimate\")),\n data = data,\n colour = \"#58B3C7\" ,\n linetype = \"dashed\")\n }\n \n print(p)\n}\n\ncreate_model_workflow <- function(outcome, fixed_effects, random_intercepts){\n # https://community.rstudio.com/t/programmatically-generate-formulas-for-lmer/8575\n # \n # ---- roxygen example ----\n # test_dat <- ManyEcoEvo_results$effects_analysis[[1]] %>%\n # unnest(review_data) %>%\n # select(study_id,\n # starts_with(\"box_cox_abs_dev\"),\n # RateAnalysis,\n # PublishableAsIs,\n # ReviewerId,\n # box_cox_var)\n # \n # test_dat <- test_dat %>%\n # janitor::clean_names() %>%\n # mutate_if(is.character, factor) %>%\n # mutate(weight = importance_weights(1/test_dat$box_cox_var))\n # create_model_workflow(\"box_cox_abs_deviation_score_estimate\",\n # \"publishable_as_is\",\n # random_intercepts = c(\"study_id\")) %>%\n # fit(test_dat)\n \n # ---- Define random effects constructor function ----\n randomify <- function(feats) {\n paste0(\"(1|\", feats, \")\", collapse = \" + \")\n }\n \n # ---- Construct formula ----\n \n randomify <- function(feats) paste0(\"(1|\", feats, \")\", collapse = \" + \")\n fixed <- paste0(fixed_effects, collapse = \" + \")\n random <- randomify(random_intercepts)\n \n model_formula <- as.formula(paste(outcome, \"~\", fixed, \"+\", random))\n \n # ---- Construct Workflow ----\n model <- linear_reg() %>%\n set_engine(\"lmer\")\n \n workflow_formula <- workflow() %>%\n add_variables(outcomes = all_of(outcome),\n predictors = all_of(c(fixed_effects, random_intercepts))) %>%\n add_model(model, formula = model_formula) #%>% \n # add_case_weights(weight)\n \n return(workflow_formula)\n \n}\n\n# Define Plotting Function\nplot_model_means_box_cox_cat <- function(dat, \n variable, \n predictor_means, \n new_order, \n title, \n back_transform = FALSE) {\n dat <- mutate(dat, \n \"{{variable}}\" := # \n fct_relevel(.f = {{variable}}, \n new_order)\n )\n \n if (back_transform == TRUE) {\n dat <- dat %>% \n mutate(box_cox_abs_deviation_score_estimate = \n sae::bxcx(unique(dat$lambda),\n x = box_cox_abs_deviation_score_estimate, InverseQ = TRUE))\n \n predictor_means <- predictor_means %>% \n as_tibble() %>% \n mutate(lambda = dat$lambda %>% unique()) %>% \n mutate(across(.cols = -PublishableAsIs,\n ~ sae::bxcx(unique(dat$lambda),x = .x, InverseQ = TRUE)))\n }\n \n p <- ggplot(dat, aes(x = {{variable}},\n y = box_cox_abs_deviation_score_estimate)) +\n # Add base dat\n geom_violin(aes(fill = {{variable}}),\n trim = TRUE, \n # scale = \"count\", #TODO consider toggle on/off?\n colour = \"white\") +\n see::geom_jitter2(width = 0.05, alpha = 0.5) +\n # Add pointrange and line from means\n geom_line(dat = predictor_means, aes(y = Mean, group = 1), linewidth = 1) +\n geom_pointrange(\n dat = predictor_means,\n aes(y = Mean, ymin = CI_low, ymax = CI_high),\n linewidth = 1,\n color = \"white\",\n alpha = 0.5\n ) +\n # Improve colors\n see::scale_fill_material_d(discrete = TRUE, \n name = \"\",\n palette = \"ice\",\n labels = pull(dat, {{variable}}) %>% \n levels() %>% \n capwords(),\n reverse = TRUE) +\n EnvStats::stat_n_text() +\n see::theme_modern() +\n theme(axis.text.x = element_text(angle = 90))\n \n if (back_transform == TRUE) {\n p <- p + \n labs(x = \"Categorical Peer Review Rating\", \n y = \"Absolute Deviation from\\n Meta-Anaytic Mean Zr\") \n } else {\n p <- p + labs(x = \"Categorical Peer Review Rating\", \n y = \"Deviation from\\nMeta-Analytic Mean Effect Size\") \n }\n \n return(p)\n}\n\npossibly_check_convergence <- possibly(performance::check_convergence,\n otherwise = NA)\n\npossibly_check_singularity <- possibly(performance::check_singularity,\n otherwise = NA)\n\n# define plotting fun for walk plotting\nplot_continuous_rating <- function(plot_data){\n plot_data %>% \n plot_cont_rating_effects(response = \"box_cox_abs_deviation_score_estimate\", \n predictor = \"RateAnalysis\", \n back_transform = FALSE,\n plot = FALSE) %>% \n pluck(2) +\n ggpubr::theme_pubr() + \n ggplot2::xlab(\"Rating\") + \n ggplot2::ylab(\"Deviation In Effect Size from Analytic Mean\")\n}\n\n\nwalk_plot_effects_diversity <- function(model, plot_data, back_transform = FALSE){\n out_plot <- plot_effects_diversity(model, plot_data, back_transform) +\n ggpubr::theme_pubr()\n \n return(out_plot)\n}\n\nplot_model_means_RE <- function(data, variable, predictor_means) {\n p <- ggplot(data, aes(x = as.factor({{variable}}), \n y = box_cox_abs_deviation_score_estimate)) +\n # Add base data\n geom_violin(aes(fill = as.factor({{variable}})), color = \"white\") +\n see::geom_jitter2(width = 0.05, alpha = 0.5) +\n # Add pointrange and line from means\n geom_line(data = predictor_means, aes(y = Mean, group = 1), linewidth = 1) +\n geom_pointrange(\n data = predictor_means,\n aes(y = Mean, ymin = CI_low, ymax = CI_high),\n linewidth = 1,\n color = \"white\"\n ) +\n # Improve colors\n scale_x_discrete(labels = c(\"0\" = \"No Random Effects\", \"1\" = \"Random Effects\")) +\n see::scale_fill_material(palette = \"ice\",\n discrete = TRUE, \n labels = c(\"No Random Effects\", \"Random effects\"), \n name = \"\") +\n see::theme_modern() +\n EnvStats::stat_n_text() +\n labs(x = \"Random Effects Included\", \n y = \"Deviation from meta-analytic mean\")+ \n guides(fill = guide_legend(nrow = 2)) +\n theme(axis.text.x = element_text(angle = 90))\n return(p)\n}\n\nposs_fit <- possibly(fit, otherwise = NA, quiet = FALSE)\n\ncreate_model_formulas <- function(outcome, fixed_effects, random_intercepts){\n # https://community.rstudio.com/t/programmatically-generate-formulas-for-lmer/8575\n # ---- Define random effects constructor function ----\n randomify <- function(feats) {\n paste0(\"(1|\", feats, \")\", collapse = \" + \")\n }\n # ---- Construct formula ----\n randomify <- function(feats) paste0(\"(1|\", feats, \")\", collapse = \" + \")\n fixed <- paste0(fixed_effects, collapse = \" + \")\n random <- randomify(random_intercepts)\n model_formula <- as.formula(paste(outcome, \"~\", fixed, \"+\", random))\n return(model_formula)\n}\n```\n:::\n\n\n\n\n## Box-Cox transformation of response variable for model fitting\n\nTo aid in interpreting explanatory models where the response variable has been Box-Cox transformed, we plotted the transformation relationship for each of our analysis datasets (@fig-box-cox-transformations).\nNote that `timetk::step_box_cox()` directly optimises the estimation of the transformation parameter lambda, $\\lambda$, using the \"Guerrero\" method such that $\\lambda$ minimises the coefficient of variation for sub-series of a numeric vector [@timetk, see `?timetk::step_box_cox()` for further details].\nConsequently, each dataset has its own unique value of $\\lambda$, and therefore a unique transformation relationship.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nprep_math_label_estimate_type <- function(estimate_string){\n paste0(substring(estimate_string, 1, 1), \n \"[\", substring(estimate_string, 2, 3), \"]\") \n}\n\nfilter_vars_main_analysis <- rlang::exprs(estimate_type == \"Zr\", \n exclusion_set == \"complete\", \n publishable_subset == \"All\", \n expertise_subset == \"All\",\n collinearity_subset == \"All\")\n\ntransformation_plot_data <- \n ManyEcoEvo::ManyEcoEvo_yi_results %>% \n bind_rows(ManyEcoEvo_results %>%\n filter(!!!filter_vars_main_analysis)) %>% \n select(dataset, estimate_type, effects_analysis) %>% \n hoist(effects_analysis, \"abs_deviation_score_estimate\",\n \"box_cox_abs_deviation_score_estimate\") %>% \n hoist(effects_analysis, \"lambda\", .simplify = TRUE, .transform = ~unique(.x)) %>% \n select(-effects_analysis) %>% \n unnest(cols = c(abs_deviation_score_estimate,\n box_cox_abs_deviation_score_estimate))\n\ntransformation_plot_data %>% \n mutate(estimate_type = forcats::as_factor(estimate_type),\n estimate_type = forcats::fct_relabel(estimate_type, prep_math_label_estimate_type),\n dataset = case_match(dataset, \n \"eucalyptus\" ~ \"Eucalyptus\",\n .default = dataset),\n dataset = dplyr::if_else(str_detect(dataset, \"blue\"),\n latex2exp::TeX(dataset, output = \"character\"),\n latex2exp::TeX(dataset, italic = TRUE, output = \"character\") )\n ) %>% \n ggplot(aes(y = abs_deviation_score_estimate, \n x = box_cox_abs_deviation_score_estimate)) + \n geom_point() +\n ggh4x::facet_grid2(c(\"dataset\", \"estimate_type\"), \n scales = \"free\", \n independent = \"all\",\n labeller = labeller(estimate_type = label_parsed, dataset = label_parsed)) +\n geom_label(aes(x = -Inf, y = Inf, \n label = latex2exp::TeX(paste(\"$\\\\lambda =$\", round(lambda, digits = 4)), output = \"character\"), \n hjust = -0.2, vjust = 2), \n size = 4, parse = TRUE) +\n theme_bw() +\n xlab(\"Box-Cox transformed absolute deviation score\") +\n ylab(\"Absolute deviation score\")\n```\n\n::: {.cell-output-display}\n![Box-Cox transformed absolute deviation scores plotted against (untransformed) absolute deviation scores.](SM3_ExplainingDeviation_files/figure-html/fig-box-cox-transformations-1.png){#fig-box-cox-transformations width=960}\n:::\n:::\n\n\n\n\n## Model Convergence and Singularity problems {#sec-convergence-singularity}\n\nDuring model fitting, especially during fitting of models with random effects using `lme4::` [@lme4], some models failed to converge while others were accompanied with console warnings of singular fit.\nHowever, the convergence checks from `lme4::` are known to be overly strict (see `?performance::check_convergence()` documentation for a discussion of this issue), consequently we checked for model warnings of convergence failure using the `performance::check_convergence()` function from the `performance::` package [@performance].\nFor all models we double-checked that they did not have singular fit by using `performance::check_singularity()`.\nDespite passing singularity checks with the `performance::` package, `parameters::parameters()` was unable to properly estimate $\\text{SE}$ and confidence intervals for the random effects of some models, which suggests singular fit.\nFor all models we also checked whether the $\\text{SE}$ of random effects estimates could be calculated, and if not, marked these models as being singular.\nAnalyses of singularity and convergence are presented throughout this document under the relevant section-heading for the analysis type and outcome, i.e. effect size ($Z_r$) or out-of-sample predictions ($y_i$).\n\n## Deviation Scores as explained by Reviewer Ratings\n\n### Effect Sizes $Z_r$ {#sec-Zr-deviation-ratings}\n\nModels pf deviation explained by categorical peer ratings all had singular fit or failed to converge for both blue tit and *Eucalyptus* datasets when random efects were included for both the effect ID and the reviewer ID (@tbl-explore-Zr-deviation-random-effects-structure). For the *Eucalyptus* dataset, when a random effect was included for effect ID only, the model failed to converge. The same was true for the blue tit dataset. As for the effect-size analysis, we included a random-effect for Reviewer ID only when fitting models of deviation explained by categorical peer ratings (See @tbl-deviation-rating-estimates).\n\nFor models of deviation explained by continuous peer-review ratings, when including both random effects for effect ID and Reviewer ID model fits were singular for both datasets (@tbl-explore-Zr-deviation-random-effects-structure). The models passed the `performance::check_singularity()` check, however, however, the $\\text{SD}$ and CI could not be estimated by `parameters::model_parameters()` with a warning stating this was likely due to singular fit. For models with a random effect for effect ID, the same occurred for the blue tit dataset, whereas for the *Eucalyptus* dataset, the model did not converge at all. Consequently, for both blue tit and *Euclayptus* datasets, we fitted and analysed models of deviation explained by continuous peer review ratings with a random effect for Reviewer ID only (See @tbl-deviation-rating-estimates).\n\n\n\n\n::: {#tbl-explore-Zr-deviation-random-effects-structure .cell tbl-cap='Singularity and convergence checking outcomes for models of deviation in effect-sizes $Z_r$ explained by peer-review ratings for different random effect structures. Problematic checking outcomes are highlighted in red.'}\n\n```{.r .cell-code}\nlibrary(multilevelmod)\n\nposs_extract_fit_engine <- purrr::possibly(extract_fit_engine, otherwise = NA)\nposs_parameters <- purrr::possibly(parameters::parameters, otherwise = NA)\n\nmodel <- linear_reg() %>%\n set_engine(\"lmer\", control = lmerControl(optimizer = \"nloptwrap\"))\n\nbase_wf <- workflow() %>%\n add_model(model)\n\nformula_study_id <- workflow() %>%\n add_variables(outcomes = box_cox_abs_deviation_score_estimate, \n predictors = c(publishable_as_is, study_id)) %>% \n add_model(model, formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + (1 | study_id ))\n\nformula_ReviewerId <- workflow() %>%\n add_variables(outcomes = box_cox_abs_deviation_score_estimate, \n predictors = c(publishable_as_is, reviewer_id)) %>% \n add_model(model, \n formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + (1 | reviewer_id ))\n\nformula_both <- workflow() %>%\n add_variables(outcomes = box_cox_abs_deviation_score_estimate, \n predictors = c(publishable_as_is, reviewer_id, study_id)) %>% \n add_model(model,\n formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + (1 | study_id) + (1 | reviewer_id))\n\n# ---- Create DF for combinatorial model specification ----\n\nmodel_vars <- \n bind_rows(\n tidyr::expand_grid(outcome = \"box_cox_abs_deviation_score_estimate\",\n fixed_effects = c(\"publishable_as_is\", \n \"rate_analysis\"),\n random_intercepts = c(\"study_id\", \n \"reviewer_id\")) %>% \n rowwise() %>% \n mutate(random_intercepts = as.list(random_intercepts)),\n tidyr::expand_grid(outcome = \"box_cox_abs_deviation_score_estimate\",\n fixed_effects = c(\"publishable_as_is\", \n \"rate_analysis\"),\n random_intercepts = c(\"study_id\", \n \"reviewer_id\")) %>% \n group_by(outcome, fixed_effects) %>% \n reframe(random_intercepts = list(random_intercepts))\n )\n\n# ----- Run all models for all combinations of dataset, exclusion_set, and publishable_subset ----\n# And Extract\nset.seed(1234)\nall_model_fits <- \n model_vars %>% \n cross_join(., \n {ManyEcoEvo::ManyEcoEvo_results %>% \n select(estimate_type, ends_with(\"set\"), effects_analysis) %>% \n dplyr::filter(expertise_subset == \"All\", \n collinearity_subset == \"All\") %>% \n select(-c(expertise_subset, collinearity_subset))}) %>% \n ungroup() %>% \n filter(publishable_subset == \"All\", \n exclusion_set == \"complete\") %>% \n select(-c(exclusion_set, publishable_subset)) %>%\n mutate(effects_analysis = \n map(effects_analysis, \n ~ .x %>% \n unnest(review_data) %>% \n select(any_of(c(\"id_col\", \"study_id\")),\n starts_with(\"box_cox_abs_dev\"), \n RateAnalysis, \n PublishableAsIs,\n ReviewerId,\n box_cox_var) %>% \n janitor::clean_names() %>%\n mutate_if(is.character, factor) \n ),\n model_workflows = pmap(.l = list(outcome, \n fixed_effects, \n random_intercepts), \n .f = create_model_workflow),\n fitted_mod_workflow = map2(model_workflows, effects_analysis, poss_fit), #NOT MEANT TO BE TEST DAT\n fitted_model = map(fitted_mod_workflow, extract_fit_engine),\n convergence = map_lgl(fitted_model, performance::check_convergence),\n singularity = map_lgl(fitted_model, performance::check_singularity),\n params = map(fitted_model, parameters::parameters)\n ) %>% \n unnest_wider(random_intercepts, names_sep = \"_\") %>% \n select(-outcome, \n -model_workflows, \n -fitted_mod_workflow,\n -effects_analysis,\n estimate_type) %>% \n replace_na(list(convergence = FALSE)) \n\n# If singularity == FALSE and convergence == TRUE, but the model appears here, then that's because\n# the SD and CI's couldn't be estimated by parameters::\n\nZr_singularity_convergence <- \n all_model_fits %>% \n left_join({all_model_fits %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"SE\"), list(is.infinite, is.na))) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type,\n convergence, \n singularity) %>% \n mutate(SE_calc = FALSE)}) %>% \n left_join({all_model_fits %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"CI\"), list(is.infinite, is.na))) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type,\n convergence, \n singularity) %>% \n mutate(CI_calc = FALSE)}) %>% \n rowwise() %>% \n mutate(across(ends_with(\"_calc\"), \n ~ replace_na(.x, TRUE))) %>% \n mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) \n\n# ----- new code showing ALL model fits not just bad fits\n\nZr_singularity_convergence %>% \n select(-fitted_model, -params, -estimate_type) %>% \n arrange(dataset,\n fixed_effects,\n random_intercepts_1,\n random_intercepts_2\n ) %>% \n mutate(across(starts_with(\"random\"), \n ~ str_replace_all(.x, \"_\", \" \") %>%\n Hmisc::capitalize() %>% \n str_replace(\"id\", \"ID\")),\n dataset = \n case_when(dataset == \"eucalyptus\" ~ Hmisc::capitalize(dataset), \n TRUE ~ dataset)) %>% \n group_by(dataset) %>% \n gt::gt() %>% \n gt::text_transform(\n locations = cells_body(\n columns = fixed_effects,\n rows = random_intercepts_1 != \"Reviewer ID\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE), #TODO why didn't work here??\n cells_body(columns = \"SE_calc\", rows = SE_calc == FALSE),\n cells_body(columns = \"CI_calc\", rows = CI_calc == FALSE)\n )\n ) %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\", \"convergence\", \"SE_calc\", \"CI_calc\"))) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_label(dataset = \"Dataset\",\n fixed_effects = \"Fixed Effect\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n SE_calc = gt::md(\"Can random effects $\\\\text{SE}$ be calculated?\"),\n CI_calc = \"Can random effect 95% CI be calculated?\") %>% \n gt::tab_spanner(label = \"Random Effects\",\n columns = gt::starts_with(\"random\")) %>% \n gt::sub_missing() %>% \n gt::cols_label_with(columns = gt::starts_with(\"random\"),\n fn = function(x) paste0(\"\")) %>% \n gt::tab_style(locations = \n cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::text_transform(fn = function(x) str_replace(x, \"publishable_as_is\", \"Categorical Peer Rating\") %>% \n str_replace(., \"rate_analysis\", \"Continuous Peer Rating\"),\n locations = cells_body(columns = c(\"fixed_effects\"))) %>% \n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"Eucalyptus\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n
Fixed Effect\n Random Effects\n Model converged?Singular Fit?

Can random effects \\(\\text{SE}\\) be calculated?

\n
Can random effect 95% CI be calculated?
blue tit
Categorical Peer RatingReviewer IDyesnoyesyes
Study IDReviewer IDyesnoyesno
Study IDno
Continuous Peer RatingReviewer IDyesnoyesyes
Study IDReviewer IDyesnonono
Study IDyesnonono
Eucalyptus
Categorical Peer RatingReviewer IDyesnoyesyes
Study IDReviewer IDyesyesnono
Study IDno
Continuous Peer RatingReviewer IDyesnoyesyes
Study IDReviewer IDyesnonono
Study IDno
\n
\n```\n\n:::\n:::\n\n\n\n\n\n### Out of sample predictions $y_i$\n\nAs for effect-size estimates $Z_r$, we encountered convergence and singularity problems when fitting models of deviation in out-of-sample predictions $y_i$ explained by categorical peer ratings for both datasets (@tbl-explore-yi-deviation-random-effects-structure). For all continuous models across both datasets, we encountered convergence and singularity problems when including random effects for both effect ID and Reviewer ID, as well as when including random effects for the effect ID only. In the latter case, for many prediction scenarios, across both blue tit and *Eucalyptus* datasets, estimated random effect coefficient CI's and $\\text{SE}$ could not be estimated. For models of deviation in out-of-sample predictions explained by continuous peer review ratings, when a random effect was included for effect ID only, CI's returned values of 0 for both bounds and model means estimated with `modelbased::estimate_means()` could not be reliably estimated and were equal for every peer-rating category (@tbl-explore-yi-deviation-random-effects-structure). Consequently, we fitted models of deviation in out-of-sample predictions explained by continuous peer ratings with a random effect for Reviewer ID only (@tbl-yi-deviation-ratings-convergence-singularity). These model structures matched converging and non-singular model structures for effect-size estimates $Z_r$ (@tbl-explore-Zr-deviation-random-effects-structure).\n\n\n\n\n::: {#tbl-explore-yi-deviation-random-effects-structure .cell tbl-cap='Singularity and convergence checking outcomes for models of deviation in out-of-sample predictions $y_r$ explained by peer-review ratings for different random effect structures. Problematic checking outcomes are highlighted in red.'}\n\n```{.r .cell-code}\nall_model_fits_yi <- \n model_vars %>% \n cross_join(., \n {ManyEcoEvo::ManyEcoEvo_yi_results %>% \n select(estimate_type, ends_with(\"set\"), effects_analysis)}) %>% \n ungroup() %>% \n mutate(effects_analysis = \n map(effects_analysis, \n ~ .x %>% \n select(any_of(c(\"id_col\", \"study_id\")),\n starts_with(\"box_cox_abs_dev\"), \n RateAnalysis, \n PublishableAsIs,\n ReviewerId,\n box_cox_var) %>% \n janitor::clean_names() %>%\n mutate_if(is.character, factor) \n ),\n model_workflows = pmap(.l = list(outcome, \n fixed_effects, \n random_intercepts), \n .f = create_model_workflow),\n fitted_mod_workflow = map2(model_workflows, effects_analysis, poss_fit), #NOT MEANT TO BE TEST DAT\n fitted_model = map(fitted_mod_workflow, poss_extract_fit_engine),\n convergence = map(fitted_model, possibly_check_convergence),\n singularity = map(fitted_model, possibly_check_singularity),\n params = map(fitted_model, poss_parameters)) %>% \n mutate(\n across(where(is.list), \n .fns = ~ coalesce(.x, list(NA)))\n ) %>% \n mutate(convergence = list_c(convergence), \n singularity = list_c(singularity)) %>% \n unnest_wider(random_intercepts, names_sep = \"_\") %>% \n select(-outcome, \n -model_workflows, \n -fitted_mod_workflow, \n -effects_analysis,\n estimate_type)\n\nyi_singularity_convergence_all <- \n all_model_fits_yi %>% \n left_join({all_model_fits_yi %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"SE\"), list(is.infinite, is.na))) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type,\n convergence, \n singularity) %>% \n mutate(SE_calc = FALSE)}) %>% \n left_join({all_model_fits %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"CI\"), list(is.infinite, is.na))) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type,\n convergence, \n singularity) %>% \n mutate(CI_calc = FALSE)}) %>% \n rowwise() %>% \n mutate(across(ends_with(\"_calc\"), \n ~ replace_na(.x, TRUE))) %>% \n mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) \n\nyi_singularity_convergence_all %>% \n select(-fitted_model, -params) %>% \n arrange(dataset,\n estimate_type,\n fixed_effects,\n random_intercepts_1,\n random_intercepts_2\n ) %>% \n mutate(across(starts_with(\"random\"), \n ~ str_replace_all(.x, \"_\", \" \") %>%\n Hmisc::capitalize() %>% \n str_replace(\"id\", \"ID\")),\n dataset = \n case_when(dataset == \"eucalyptus\" ~ Hmisc::capitalize(dataset), \n TRUE ~ dataset)) %>% \n mutate(fixed_effects = forcats::fct_recode(fixed_effects, \n \"Categorical Peer Rating\" = \"publishable_as_is\",\n \"Continuous Peer Rating\" = \"rate_analysis\")) %>%\n group_by(fixed_effects) %>% \n arrange(fixed_effects, dataset, pick(starts_with(\"random\"))) %>% \n relocate(estimate_type,.after = dataset) %>%\n gt::gt(rowname_col = \"dataset\") %>% \n gt::text_transform(\n locations = cells_body(\n columns = fixed_effects,\n rows = random_intercepts_1 != \"Reviewer ID\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE), #TODO why didn't work here??\n cells_body(columns = \"SE_calc\", rows = SE_calc == FALSE),\n cells_body(columns = \"CI_calc\", rows = CI_calc == FALSE)\n )\n ) %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\", \"convergence\", \"SE_calc\", \"CI_calc\"))) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_label(dataset = \"Dataset\",\n estimate_type = \"Prediction Scenario\",\n fixed_effects = \"Fixed Effect\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n SE_calc = gt::md(\"Can random effects $\\\\text{SE}$ be calculated?\"),\n CI_calc = \"Can random effect 95% CI be calculated?\") %>% \n gt::tab_spanner(label = \"Random Effects\",\n columns = gt::starts_with(\"random\")) %>% \n gt::sub_missing() %>% \n gt::cols_label_with(columns = gt::starts_with(\"random\"),\n fn = function(x) paste0(\"\")) %>% \n gt::tab_style(locations = \n cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::text_transform(fn = function(x) str_replace(x, \"publishable_as_is\", \"Categorical Peer Rating\") %>% \n str_replace(., \"rate_analysis\", \"Continuous Peer Rating\"),\n locations = cells_body(columns = c(\"fixed_effects\"))) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = estimate_type != \"y25\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::tab_style(locations = cells_stub(rows = str_detect(dataset, \"Eucalyptus\")),\n style = cell_text(style = \"italic\")) %>%\n gt_fmt_yi(columns = \"estimate_type\") \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
\n Random Effects\n Prediction ScenarioModel converged?Singular Fit?

Can random effects \\(\\text{SE}\\) be calculated?

\n
Can random effect 95% CI be calculated?
Categorical Peer Rating
EucalyptusReviewer ID$$y_{25}$$yesnoyesyes
Reviewer ID$$y_{50}$$yesnoyesyes
Reviewer ID$$y_{75}$$yesyesnoyes
EucalyptusStudy IDReviewer ID$$y_{25}$$yesnoyesyes
Study IDReviewer ID$$y_{50}$$yesnoyesyes
Study IDReviewer ID$$y_{75}$$yesyesnoyes
EucalyptusStudy ID$$y_{25}$$yesnoyesyes
Study ID$$y_{50}$$yesnoyesyes
Study ID$$y_{75}$$yesnoyesyes
blue titReviewer ID$$y_{25}$$yesyesnoyes
Reviewer ID$$y_{50}$$yesnoyesyes
Reviewer ID$$y_{75}$$yesnoyesyes
blue titStudy IDReviewer ID$$y_{25}$$yesnoyesyes
Study IDReviewer ID$$y_{50}$$yesyesnoyes
Study IDReviewer ID$$y_{75}$$yesnoyesyes
blue titStudy ID$$y_{25}$$yesnoyesyes
Study ID$$y_{50}$$yesnoyesyes
Study ID$$y_{75}$$yesnoyesyes
Continuous Peer Rating
EucalyptusReviewer ID$$y_{25}$$yesnoyesyes
Reviewer ID$$y_{50}$$yesyesnoyes
Reviewer ID$$y_{75}$$yesyesnoyes
EucalyptusStudy IDReviewer ID$$y_{25}$$yesnonoyes
Study IDReviewer ID$$y_{50}$$no
Study IDReviewer ID$$y_{75}$$yesnonoyes
EucalyptusStudy ID$$y_{25}$$yesnonoyes
Study ID$$y_{50}$$yesnonoyes
Study ID$$y_{75}$$yesnoyesyes
blue titReviewer ID$$y_{25}$$yesyesnoyes
Reviewer ID$$y_{50}$$yesnoyesyes
Reviewer ID$$y_{75}$$yesnoyesyes
blue titStudy IDReviewer ID$$y_{25}$$yesnonoyes
Study IDReviewer ID$$y_{50}$$yesnonoyes
Study IDReviewer ID$$y_{75}$$yesnonoyes
blue titStudy ID$$y_{25}$$yesnonoyes
Study ID$$y_{50}$$yesnonoyes
Study ID$$y_{75}$$yesnoyesyes
\n
\n```\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_fitted_mods <- \n ManyEcoEvo::ManyEcoEvo_yi_viz %>% \n filter(model_name %in% c(\"box_cox_rating_cat\", \n \"box_cox_rating_cont\", \n \"sorensen_glm\", \n \"uni_mixed_effects\")) %>% \n select(-ends_with(\"_plot\"), -MA_fit_stats, -contains(\"mod_\")) %>% \n rowwise() %>% \n mutate(singularity = possibly_check_singularity(model), \n convergence = list(possibly_check_convergence(model))) %>% \n ungroup() %>% mutate(\n across(where(is.list), \n .fns = ~ coalesce(.x, list(NA)))\n ) %>% \n mutate(convergence = list_c(convergence),\n singularity = case_when(is.na(convergence) ~ NA,\n TRUE ~ singularity))\n\nyi_convergence_singularity <- \n yi_fitted_mods %>%\n left_join({ # Check if SE and CI can be calculated\n yi_fitted_mods %>% \n unnest(model_params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"SE\"), list(is.infinite, is.na))) %>% \n distinct(dataset, estimate_type, model_name) %>% \n mutate(SE_calc = FALSE)\n }, by = join_by(dataset, estimate_type, model_name)) %>% \n left_join({\n yi_fitted_mods %>% \n unnest(model_params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"CI_\"), list(is.infinite, is.na))) %>% \n distinct(dataset, estimate_type, model_name) %>% \n mutate(CI_calc = FALSE)\n }, by = join_by(dataset, estimate_type, model_name)) %>% \n rowwise() %>%\n mutate(across(ends_with(\"_calc\"), \n ~ replace_na(.x, TRUE)),\n across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence) | is_na(convergence), NA, .x)),\n model_name = forcats::as_factor(model_name),\n model_name = forcats::fct_relevel(model_name, \n c(\"box_cox_rating_cat\", \n \"box_cox_rating_cont\",\n \"sorensen_glm\",\n \"uni_mixed_effects\")),\n model_name = \n forcats::fct_recode(\n model_name,\n `Deviation explained by categorical ratings` = \"box_cox_rating_cat\",\n `Deviation explained by continuous ratings` = \"box_cox_rating_cont\",\n `Deviation explained by Sorensen's index` = \"sorensen_glm\",\n `Deviation explained by inclusion of random effects` = \n \"uni_mixed_effects\"),\n dataset = case_when(str_detect(dataset, \"eucalyptus\") ~ \"Eucalyptus\",\n TRUE ~ dataset)) %>% \n ungroup() %>% \n select(-model)\n\nyi_singularity_convergence_sorensen_mixed_mod <- \n yi_convergence_singularity %>% \n filter(str_detect(model_name, \"Sorensen\") | str_detect(model_name, \"random\"))\n```\n:::\n\n\n\n\nWe fitted the same deviation models on the out-of-sample-predictions dataset that we fitted for the effect-size dataset. \nHowever, while all models of deviation explained by categorical peer-ratings converged, the following datasets and prediction scenarios suffered from singular fit: blue tit - $y_{25}$, *Eucalyptus* - $y_{75}$ (@tbl-yi-deviation-ratings-convergence-singularity).\nModels of deviation explained by *continuous* ratings all converged, however models for the out-of-sample predictions model fit was singular.\nSimilarly to the effect-size ($Z_r$) dataset, $\\text{SD}$ and CI could not be estimated for random effects in some models (@tbl-yi-deviation-ratings-convergence-singularity), consequently we interpreted this to mean the models had singular fit (See @sec-Zr-deviation-ratings). \nResults of all deviation models are therefore presented only for models with non-singular fit, and that converged (@tbl-yi-deviation-ratings-convergence-singularity).\n\n\n\n\n\n::: {#tbl-yi-deviation-ratings-convergence-singularity .cell tbl-cap='Singularity and convergence checking for models of deviation in out-of-sample-predictions $y_i$ explained by peer-ratings.'}\n\n```{.r .cell-code}\nyi_convergence_singularity %>%\n filter(stringr::str_detect(model_name, \"ratings\")) %>% \n select(-model_params) %>% \n group_by(model_name) %>% \n gt::gt(rowname_col = \"dataset\") %>% \n gt::tab_style(locations = \n cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::cols_label(dataset = \"Dataset\",\n estimate_type = \"Prediction Scenario\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n SE_calc = gt::md(\"Can random effects $\\\\text{SE}$ be calculated?\"),\n CI_calc = \"Can random effect CI be calculated?\") %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\",\n \"convergence\",\n \"SE_calc\", \n \"CI_calc\")\n )) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = estimate_type != \"y25\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt_fmt_yi(\"estimate_type\") %>% \n gt::tab_style(locations = cells_stub(rows = str_detect(dataset, \"Eucalyptus\")),\n style = cell_text(style = \"italic\")) %>% \n tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE),\n cells_body(columns = \"SE_calc\", rows = SE_calc == FALSE),\n cells_body(columns = \"CI_calc\", rows = CI_calc == FALSE)\n )\n ) %>% \n gt::sub_missing()\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Prediction ScenarioSingular Fit?Model converged?

Can random effects \\(\\text{SE}\\) be calculated?

\n
Can random effect CI be calculated?
Deviation explained by continuous ratings
blue tit$$y_{25}$$yesyesnono
$$y_{50}$$noyesyesyes
$$y_{75}$$noyesyesyes
Eucalyptus$$y_{25}$$noyesyesyes
$$y_{50}$$yesyesnono
$$y_{75}$$yesyesnono
Deviation explained by categorical ratings
blue tit$$y_{25}$$yesyesnono
$$y_{50}$$noyesyesyes
$$y_{75}$$noyesyesyes
Eucalyptus$$y_{25}$$noyesyesyes
$$y_{50}$$noyesyesyes
$$y_{75}$$yesyesnono
\n
\n```\n\n:::\n:::\n\n\n\n\nGroup means and $95\\%$ confidence intervals for different categories of peer-review rating are all overlapping (@fig-yi-deviation-cat-rating).\nThe fixed effect of peer review rating also explains virtually no variability in deviation scores for out-of-sample predictions $y_i$ (@tbl-yi-deviation-ratings-convergence-singularity).\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_violin_cat_plot_data <- \n ManyEcoEvo::ManyEcoEvo_yi_viz %>% \n filter(model_name %in% \"box_cox_rating_cat\") %>%\n left_join(., \n {ManyEcoEvo::ManyEcoEvo_yi_results %>% \n select(dataset, estimate_type, effects_analysis) %>% \n hoist(effects_analysis, \"lambda\", .transform = unique) %>% \n select(-effects_analysis)}, \n by = join_by(dataset, estimate_type)) %>% \n mutate( dataset = case_when(str_detect(dataset, \"eucalyptus\") ~ \"Eucalyptus\",\n TRUE ~ dataset)) %>% \n semi_join({\n yi_convergence_singularity %>% \n filter( str_detect(model_name, \"categorical\"), \n !singularity, convergence, SE_calc, CI_calc)\n }, by = join_by(\"dataset\", \"estimate_type\")) %>% \n select(dataset, estimate_type, model_name, model) %>% \n mutate(predictor_means = \n map(model, modelbased::estimate_means, backend = \"marginaleffects\" ),\n model_data = map(model, ~pluck(.x, \"frame\") %>% \n drop_na() %>% \n as_tibble()),\n plot_name = paste(dataset, \n estimate_type,\n \"violin_cat\",\n sep = \"_\")) %>% \n mutate(model_data = map(model_data, \n .f = ~ mutate(.x, PublishableAsIs =\n str_replace(PublishableAsIs,\n \"publishable with \", \"\") %>%\n str_replace(\"deeply flawed and \", \"\") %>% \n capwords())),\n predictor_means = map(predictor_means,\n .f = ~ mutate(.x, PublishableAsIs =\n str_replace(PublishableAsIs,\n \"publishable with \", \"\") %>%\n str_replace(\"deeply flawed and \", \"\") %>% \n capwords()))) %>% \n select(-model)\n\nyi_violin_cat_plots <- yi_violin_cat_plot_data %>% \n pmap(.l = list(.$model_data, .$predictor_means, .$plot_name),\n .f = ~ plot_model_means_box_cox_cat(..1, \n PublishableAsIs, \n ..2,\n new_order = \n c(\"Unpublishable\",\n \"Major Revision\",\n \"Minor Revision\",\n \"Publishable As Is\"),\n ..3)) %>% \n purrr::set_names({yi_violin_cat_plot_data %>% \n pull(plot_name) %>% \n stringr::str_split(\"_violin_cat\", 2) %>% \n map_chr(pluck, 1) })\n\n\nsubfigcaps_yi_cat <- yi_violin_cat_plot_data %>% \n mutate(dataset = \n case_when(dataset == \"Eucalyptus\" ~ paste0(\"*\", dataset, \"*\"), \n TRUE ~ Hmisc::capitalize(dataset))) %>% \n unite(plot_name, dataset, estimate_type, sep = \", \") %>% \n pull(plot_name)\n\nfig_cap_yi_deviation_cat_rating <- \n paste0(\"Violin plot of Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of categorical peer-review ratings ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. \", subfigcaps_yi_cat %>% \n paste0(paste0(paste0(\"**\", LETTERS[1:length(subfigcaps_yi_cat)], \"**\", sep = \"\"), sep = \": \"), ., collapse = \", \"), \".\")\n```\n:::\n\n::: {.cell .column-body-outset}\n\n```{.r .cell-code}\nlibrary(patchwork)\npatchwork::wrap_plots(yi_violin_cat_plots, ncol = 2, nrow = 2, guides = 'collect') +\n patchwork::plot_annotation(tag_levels = 'A')\n```\n\n::: {.cell-output-display}\n![Violin plot of Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of categorical peer-review ratings ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. **A**: Blue tit, y50, **B**: Blue tit, y75, **C**: *Eucalyptus*, y25, **D**: *Eucalyptus*, y50.](SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-1.png){#fig-yi-deviation-cat-rating width=960}\n:::\n:::\n\n\n\n\nThere was a lack of any clear relationships between quantitative review scores and $y_i$ deviation scores (@tbl-yi-deviation-parameter-estimates).\nPlots of these relationships indicated either no relationship or extremely weak positive relationships (@fig-yi-deviation-cont-rating).\nRecall that positive relationships mean that as review scores became more favorable, the deviation from the meta-analytic mean increased, which is surprising.\nBecause almost no variability in $y_i$ deviation score was explained by reviewer ratings (@tbl-yi-deviation-parameter-estimates), this pattern does not appear to merit further consideration.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_cont_plot_data <-\n ManyEcoEvo::ManyEcoEvo_yi_viz %>% \n filter(model_name %in% c(\"box_cox_rating_cont\")) %>% \n mutate(dataset = case_match(dataset, \"eucalyptus\" ~ \"Eucalyptus\",.default = dataset)) %>% \n semi_join({yi_convergence_singularity %>% \n filter( str_detect(model_name, \"cont\"), # Omit all in-estimable models\n !singularity, \n convergence, \n #SE_calc, \n #CI_calc\n )}, \n by = join_by(\"dataset\", \"estimate_type\")) %>% \n select(dataset, estimate_type, model_name, model) %>% \n mutate(plot_data = map(model, pluck, \"frame\")) \n\nsubfigcaps <- yi_cont_plot_data %>% \n mutate(dataset = \n case_when(dataset == \"Eucalyptus\" ~ paste0(\"*\", dataset, \"*\"), \n TRUE ~ Hmisc::capitalize(dataset))) %>% \n unite(plot_name, dataset, estimate_type, sep = \", \") %>% \n pull(plot_name)\n\nfig_cap_yi_deviation_cont_rating <- \n paste0(\"Scatterplots explaining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of continuous ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. \", subfigcaps %>% \n paste0(paste0(paste0(\"**\", LETTERS[1:length(subfigcaps)], \"**\", sep = \"\"), sep = \": \"), ., collapse = \", \"), \".\")\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_cont_plots <- \n yi_cont_plot_data$plot_data %>% \n map(.f = ~ plot_continuous_rating(.x)) %>% \n purrr::set_names({yi_cont_plot_data %>% \n unite(plot_name, dataset, estimate_type, sep = \" \") %>% \n pull(plot_name)})\n\npatchwork::wrap_plots(yi_cont_plots, heights = 4, byrow = TRUE) +\n patchwork::plot_annotation(tag_levels = 'A')\n```\n\n::: {.cell-output-display}\n![Scatterplots explaining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of continuous ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. **A**: Blue tit, y50, **B**: Blue tit, y75, **C**: *Eucalyptus*, y25.](SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cont-rating-1.png){#fig-yi-deviation-cont-rating width=768}\n:::\n:::\n\n::: {#tbl-yi-deviation-model-params .cell .column-page tbl-cap='Parameter estimates for univariate models of Box-Cox transformed deviation from the mean $y_i$ estimate as a function of categorical peer-review rating, continuous peer-review rating, and Sorensen\\'s index for blue tit and *Eucalyptus* analyses, and also for the inclusion of random effects for *Eucalyptus* analyses.'}\n\n```{.r .cell-code}\nManyEcoEvo_yi_viz %>%\n filter(\n model_name %nin% c(\"MA_mod\",\n \"box_cox_rating_cat_no_int\",\n \"MA_mod_mv\")) %>% \n mutate( dataset = case_when(str_detect(dataset, \"eucalyptus\") ~ \"Eucalyptus\",\n TRUE ~ dataset),\n model_name = forcats::as_factor(model_name) %>% \n forcats::fct_relevel(c(\"box_cox_rating_cat\", \n \"box_cox_rating_cont\", \n \"sorensen_glm\", \n \"uni_mixed_effects\")) %>% \n forcats::fct_recode(\n `Deviation explained by categorical ratings` = \"box_cox_rating_cat\",\n `Deviation explained by continuous ratings` = \"box_cox_rating_cont\",\n `Deviation explained by Sorensen's index` = \"sorensen_glm\",\n `Deviation explained by inclusion of random effects` = \"uni_mixed_effects\")\n ) %>% \n semi_join(\n {yi_convergence_singularity %>% \n filter(!singularity, \n convergence, \n SE_calc, \n CI_calc) },\n by = join_by(\"dataset\", \"estimate_type\", \"model_name\")\n ) %>% \n select(dataset, \n estimate_type,\n model_name, \n model_params) %>% \n unnest(model_params) %>% \n mutate(\n Group = case_match(Group,\n \"study_id\" ~ \"Effect ID\",\n \"ReviewerId\" ~ \"Reviewer ID\",\n \"\" ~ NA,\n .default = Group),\n df_error = as.integer(df_error),\n Parameter = str_remove(Parameter, \"PublishableAsIs\") %>% \n str_replace(\"diversity\", \"Sorensen's\") %>% \n str_replace_all(., \"_\", \" \") %>%\n str_remove(., \"1\") %>% \n Hmisc::capitalize() ) %>%\n group_by(model_name) %>% \n arrange(model_name, \n dataset, estimate_type) %>%\n select(-CI) %>% \n gt::gt(rowname_col = \"dataset\") %>% \n gt::fmt(columns = \"p\",\n fns = function(x) gtsummary::style_pvalue(x)) %>% \n gt::cols_label(CI_low = gt::md(\"95\\\\%CI\"),\n estimate_type = \"Prediction Scenario\",\n SE = gt::md(\"$\\\\text{SE}$\"),\n df_error = gt::md(\"$\\\\mathit{df}$\"),\n t = gt::md(\"$t$\"),\n p = gt::md(\"*p*\")) %>% \n gt::cols_merge(columns = starts_with(\"CI_\"), \n pattern = \"[{1},{2}]\") %>% \n gt::cols_move(columns = CI_low, after = SE) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::fmt(columns = c(Coefficient, SE, starts_with(\"CI_\"), t) ,\n rows = Parameter %nin% c(\"RateAnalysis\", \"SD (Observations)\", \"mixed_model1\"),\n fns = function(x) format(round(x, 2),nsmall = 2)) %>%\n gt::fmt(columns = c(Coefficient, SE, t, starts_with(\"CI_\")) ,\n rows = Parameter %in% c(\"RateAnalysis\", \"SD (Observations)\", \"mixed_model1\"),\n fns = function(x) ifelse(x < 0.0009, \n format(x, nsmall = 2, digits = 1),\n round(x, digits = 2))) %>%\n gt::cols_move(columns = c(Effects, Group), after = Parameter) %>% \n gt::sub_missing(columns = c(Effects, Group, t, df_error, p), \n missing_text = \"\") %>% \n gt::cols_hide(Effects) %>% \n gt::text_transform(fn = function(x) map(x, gt::md), \n locations = gt::cells_row_groups()) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = Parameter != \"(Intercept)\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::fmt_number(columns = c(Coefficient, SE, t, starts_with(\"CI_\")), decimals = 2,drop_trailing_zeros = TRUE, drop_trailing_dec_mark = TRUE) %>% \n gt::fmt_scientific(columns = c( starts_with(\"CI_\")),\n rows = abs(CI_low) < 0.01 | abs(CI_high) < 0.01 | abs(CI_low) > 1000 | abs(CI_high) > 1000,\n decimals = 2) %>%\n gt::fmt_scientific(columns = c( starts_with(\"Coefficient\")),\n rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000,\n decimals = 2) %>%\n gt::fmt_scientific(columns = c( starts_with(\"SE\")),\n rows = abs(SE) < 0.01 | abs(SE) > 1000,\n decimals = 2) %>%\n gt::tab_style(locations = gt::cells_stub(rows = str_detect(dataset, \"Eucalyptus\")),\n style = cell_text(style = \"italic\")) %>% \n gt::cols_label(Group = \"Random Effect\") %>% \n gt_fmt_yi(\"estimate_type\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n \n \n \n
Prediction ScenarioParameterRandom EffectCoefficient

\\(\\text{SE}\\)

\n

95%CI

\n

\\(t\\)

\n

\\(\\mathit{df}\\)

\n

p

\n

Deviation explained by categorical ratings

\n
Eucalyptus$$y_{25}$$(Intercept)
−0.350.32[−0.98,0.28]−1.11680.3
$$y_{25}$$Publishable with major revision
0.290.35[−0.39,0.98]0.841680.4
$$y_{25}$$Publishable with minor revision
0.410.34[−0.26,1.07]1.211680.2
$$y_{25}$$Publishable as is
0.350.36[−0.35,1.06]0.991680.3
$$y_{25}$$SD (Intercept)Reviewer ID0.230.15[0.07,0.79]


$$y_{25}$$SD (Observations)Residual0.920.06[0.82,1.04]


Eucalyptus$$y_{50}$$(Intercept)
−0.610.4[−1.4,0.18]−1.521650.13
$$y_{50}$$Publishable with major revision
0.180.44[−0.69,1.05]0.41650.7
$$y_{50}$$Publishable with minor revision
0.250.42[−0.59,1.09]0.581650.6
$$y_{50}$$Publishable as is
0.090.47[−0.83,1.01]0.191650.8
$$y_{50}$$SD (Intercept)Reviewer ID0.120.42[1.50 × 10−4,1.02 × 102]


$$y_{50}$$SD (Observations)Residual1.290.08[1.14,1.46]


blue tit$$y_{50}$$(Intercept)
−1.230.29[−1.81,−0.65]−4.18218<0.001
$$y_{50}$$Publishable with major revision
−0.210.3[−0.81,0.39]−0.692180.5
$$y_{50}$$Publishable with minor revision
−0.250.3[−0.85,0.34]−0.832180.4
$$y_{50}$$Publishable as is
−0.420.32[−1.05,0.21]−1.332180.2
$$y_{50}$$SD (Intercept)Reviewer ID0.220.09[0.11,0.47]


$$y_{50}$$SD (Observations)Residual0.710.04[0.64,0.8]


blue tit$$y_{75}$$(Intercept)
−1.510.27[−2.05,−0.97]−5.52231<0.001
$$y_{75}$$Publishable with major revision
0.090.28[−0.46,0.64]0.332310.7
$$y_{75}$$Publishable with minor revision
0.350.28[−0.2,0.9]1.252310.2
$$y_{75}$$Publishable as is
0.390.29[−0.19,0.97]1.332310.2
$$y_{75}$$SD (Intercept)Reviewer ID0.290.06[0.18,0.44]


$$y_{75}$$SD (Observations)Residual0.630.03[0.57,0.7]


Deviation explained by continuous ratings

\n
Eucalyptus$$y_{25}$$(Intercept)
−0.460.26[−0.97,0.06]−1.761700.081
$$y_{25}$$RateAnalysis
6.14 × 10−33.54 × 10−3[−8.61 × 10−4,1.31 × 10−2]1.731700.085
$$y_{25}$$SD (Intercept)Reviewer ID0.120.21[4.27 × 10−3,3.64]


$$y_{25}$$SD (Observations)Residual0.930.06[0.82,1.05]


blue tit$$y_{50}$$(Intercept)
−1.350.24[−1.82,−0.88]−5.65220<0.001
$$y_{50}$$RateAnalysis
−1.82 × 10−33.05 × 10−3[−7.83 × 10−3,4.18 × 10−3]−0.62200.6
$$y_{50}$$SD (Intercept)Reviewer ID0.240.08[0.12,0.47]


$$y_{50}$$SD (Observations)Residual0.710.04[0.64,0.79]


blue tit$$y_{75}$$(Intercept)
−1.660.22[−2.1,−1.23]−7.52233<0.001
$$y_{75}$$RateAnalysis
5.62 × 10−32.79 × 10−3[1.22 × 10−4,1.11 × 10−2]2.012330.045
$$y_{75}$$SD (Intercept)Reviewer ID0.30.06[0.2,0.45]


$$y_{75}$$SD (Observations)Residual0.630.03[0.57,0.7]


Deviation explained by Sorensen’s index

\n
Eucalyptus$$y_{25}$$(Intercept)
−0.31.48[−3.2,2.59]−0.21360.8
$$y_{25}$$Mean Sorensen's index
0.442.19[−3.86,4.74]0.2360.8
Eucalyptus$$y_{50}$$(Intercept)
−1.322.1[−5.43,2.79]−0.63360.5
$$y_{50}$$Mean Sorensen's index
1.323.16[−4.87,7.51]0.42360.7
Eucalyptus$$y_{75}$$(Intercept)
−0.711.78[−4.19,2.78]−0.4360.7
$$y_{75}$$Mean Sorensen's index
0.342.71[−4.96,5.64]0.1236>0.9
blue tit$$y_{25}$$(Intercept)
−0.770.6[−1.94,0.4]−1.29610.2
$$y_{25}$$Mean Sorensen's index
−0.231.04[−2.27,1.82]−0.22610.8
blue tit$$y_{50}$$(Intercept)
−0.430.73[−1.86,0.99]−0.6580.6
$$y_{50}$$Mean Sorensen's index
−1.771.27[−4.26,0.71]−1.4580.2
blue tit$$y_{75}$$(Intercept)
−1.720.74[−3.16,−0.28]−2.34610.019
$$y_{75}$$Mean Sorensen's index
0.781.28[−1.73,3.29]0.61610.5

Deviation explained by inclusion of random effects

\n
Eucalyptus$$y_{25}$$(Intercept)
−0.530.26[−1.05,−0.02]−2.03360.042
$$y_{25}$$Mixed model
0.740.31[0.13,1.35]2.37360.018
Eucalyptus$$y_{50}$$(Intercept)
−0.570.4[−1.36,0.21]−1.43360.2
$$y_{50}$$Mixed model
0.180.48[−0.75,1.11]0.38360.7
Eucalyptus$$y_{75}$$(Intercept)
−0.350.39[−1.11,0.41]−0.9360.4
$$y_{75}$$Mixed model
−0.190.46[−1.1,0.71]−0.41360.7
\n
\n```\n\n:::\n:::\n\n\n\n\n## Deviation scores as explained by the distinctiveness of variables in each analysis\n\n### Out of sample predictions $y_i$ {#sec-sorensen-yi}\n\nGiven the convergence and singularity issues encountered with most other analyses, we also checked for convergence and singularity issues in models of deviation explained by Sorensen's similarity index for $y_i$ estimates (@tbl-deviation-similarity-convergence-singularity-yi).\nAll models fitted without problem.\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_sorensen_plot_data <- \n ManyEcoEvo_yi_viz %>% \n filter(str_detect(model_name, \"sorensen_glm\")) %>% \n mutate( dataset = \n case_when(str_detect(dataset, \"eucalyptus\") ~ \"Eucalyptus\",\n TRUE ~ dataset),\n model_name = forcats::as_factor(model_name) %>% \n forcats::fct_relevel(c(\"box_cox_rating_cat\", \n \"box_cox_rating_cont\", \n \"sorensen_glm\", \n \"uni_mixed_effects\")) %>% \n forcats::fct_recode(\n `Deviation explained by categorical ratings` = \"box_cox_rating_cat\",\n `Deviation explained by continuous ratings` = \"box_cox_rating_cont\",\n `Deviation explained by Sorensen's index` = \"sorensen_glm\",\n `Deviation explained by inclusion of random effects` = \"uni_mixed_effects\")) %>% \n select(dataset, estimate_type, model_name, model) %>% \n semi_join(\n {yi_convergence_singularity %>% \n filter(!singularity, \n convergence, \n SE_calc, CI_calc) },\n by = join_by(\"dataset\", \"estimate_type\", \"model_name\")\n ) %>% \n mutate(\n plot_data = map(model, ~ pluck(.x, \"fit\", \"data\") %>% \n rename(box_cox_abs_deviation_score_estimate = ..y))) %>% \n unite(plot_names, dataset, estimate_type, sep = \", \")\n\nyi_sorensen_subfigcaps <- \n yi_sorensen_plot_data$plot_names %>% \n paste0(paste0(paste0(\"**\", LETTERS[1:length(yi_sorensen_plot_data$plot_names)], \"**\", sep = \"\"), sep = \": \"), ., collapse = \", \")\n\nyi_sorensen_fig_cap <- paste0(\"Scatter plots examining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of Sorensen's similarity index. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean (@fig-box-cox-transformations). \",\n yi_sorensen_plot_data$plot_names %>% paste0(paste0(paste0(\"**\", LETTERS[1:length(yi_sorensen_plot_data$plot_names)], \"**\", sep = \"\"), sep = \": \"), ., collapse = \", \"),\n \".\")\n```\n:::\n\n::: {.cell layout-nrow=\"2\"}\n\n```{.r .cell-code}\nyi_sorensen_plots <- \n map2(.x = yi_sorensen_plot_data$model, \n .y = yi_sorensen_plot_data$plot_data,\n .f = ~ walk_plot_effects_diversity(model = .x, plot_data = .y)) %>% \n purrr::set_names(yi_sorensen_plot_data$plot_names)\n\npatchwork::wrap_plots(yi_sorensen_plots,heights = 4, byrow = TRUE) +\n patchwork::plot_annotation(tag_levels = 'A')\n```\n\n::: {.cell-output-display}\n![Scatter plots examining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of Sorensen's similarity index. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean (@fig-box-cox-transformations). **A**: blue tit, y25, **B**: blue tit, y50, **C**: blue tit, y75, **D**: Eucalyptus, y25, **E**: Eucalyptus, y50, **F**: Eucalyptus, y75.](SM3_ExplainingDeviation_files/figure-html/fig-yi-sorensen-plots-1.png){#fig-yi-sorensen-plots width=672}\n:::\n:::\n\n\n\n\nWe checked the fitted models for the inclusion of random effects for the *Eucalyptus* dataset, and for models of deviation explained by Sorensen's similarity index for $y_i$ estimates (@tbl-deviation-similarity-convergence-singularity-yi). All models converged, and no singular fits were encountered.\n\n\n\n\n::: {#tbl-deviation-similarity-convergence-singularity-yi .cell tbl-cap='Singularity and convergence checks for models of deviation explained by Sorensen\\'s similarity index and inclusion of random effects for out-of-sample predictions, $y_i$. Models of Deviation explained by inclusion of random effects are not presented for blue tit analyses because the number of models not using random effects was less than our preregistered threshold.'}\n\n```{.r .cell-code}\nyi_singularity_convergence_sorensen_mixed_mod %>% \n drop_na(convergence) %>% \n mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) %>% \n select(-model_params) %>% \n group_by(model_name) %>% \n gt::gt(rowname_col = \"dataset\") %>% \n gt::tab_style(locations = cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::cols_label(dataset = \"Dataset\",\n estimate_type = \"Prediction Scenario\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n SE_calc = gt::md(\"Can random effects $\\\\text{SE}$ be calculated?\"),\n CI_calc = \"Can random effects CI be calculated?\") %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE),\n cells_body(columns = \"SE_calc\", rows = SE_calc == FALSE),\n cells_body(columns = \"CI_calc\", rows = CI_calc == FALSE)\n )) %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\",\n \"convergence\", \n \"SE_calc\", \n \"CI_calc\")\n )) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = estimate_type != \"y25\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt_fmt_yi(\"estimate_type\") %>%\n gt::tab_style(locations = cells_stub(rows = str_detect(dataset, \"Eucalyptus\")),\n style = cell_text(style = \"italic\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Prediction ScenarioSingular Fit?Model converged?

Can random effects \\(\\text{SE}\\) be calculated?

\n
Can random effects CI be calculated?
Deviation explained by Sorensen's index
blue tit$$y_{25}$$noyesyesyes
$$y_{50}$$noyesyesyes
$$y_{75}$$noyesyesyes
Eucalyptus$$y_{25}$$noyesyesyes
$$y_{50}$$noyesyesyes
$$y_{75}$$noyesyesyes
Deviation explained by inclusion of random effects
Eucalyptus$$y_{25}$$noyesyesyes
$$y_{50}$$noyesyesyes
$$y_{75}$$noyesyesyes
\n
\n```\n\n:::\n:::\n\n\n\n\n## Deviation scores as explained by the inclusion of random effects\n\n### Out of sample predictions $y_i$\n\nOnly 1 of the Blue tit out-of-sample analyses $y_i$ included random effects, which was below our preregistered threshold of 5 for running the models of Box-Cox transformed deviation from the meta-analytic mean explained by the inclusion of random-effects. However, 14 *Eucalyptus* analyses included in the out-of-sample $y_{i}$ results included only fixed effects, which crossed our pre-registered threshold.\n\nConsequently, we performed this analysis for the *Eucalyptus* dataset only, here we present results for the out of sample prediction $y_{i}$ results.\nThere is inconsistent evidence of somewhat higher Box-Cox-transformed deviation values for models including a random effect, meaning the analyses of the *Eucalyptus* dataset that included random effects averaged slightly higher deviation from the meta-analytic mean out-of-sample estimate in the relevant prediction scenario.\nThis is most evident for the $y_{25}$ predictions which both shows the greatest difference in Box-Cox transformed deviation values (@fig-yi-euc-deviation-RE-plots) and explains the most variation in $y_i$ deviation score (@tbl-yi-deviation-model-params).\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nyi_deviation_RE_plot_data <- \n ManyEcoEvo_yi_results %>% \n mutate(dataset = Hmisc::capitalize(dataset)) %>% \n semi_join({yi_singularity_convergence_sorensen_mixed_mod %>% filter(!singularity, convergence, SE_calc, CI_calc, str_detect(model_name, \"random\"))}, by = join_by(dataset, estimate_type)) %>% \n select(dataset, estimate_type, model = uni_mixed_effects) %>% \n rowwise() %>% \n filter(!is_logical(model)) %>% ungroup %>% \n mutate(predictor_means = map(model, .f = ~ pluck(.x, \"fit\") %>% \n modelbased::estimate_means(.)),\n plot_data = map(model, pluck, \"fit\", \"data\"),\n plot_data = map(plot_data, \n rename, \n box_cox_abs_deviation_score_estimate = ..y)) %>% \n mutate(dataset = case_when(str_detect(dataset, \"Eucalyptus\") ~ paste0(\"*\", dataset, \"*\"), TRUE ~ dataset)) %>% \n unite(plot_names, dataset, estimate_type, sep = \", \")\n\nyi_deviation_RE_plot_subfigcaps <- yi_deviation_RE_plot_data %>% \n pull(plot_names)\n\nyi_deviation_RE_plot_figcap <- \n paste0(\"Violin plot of Box-Cox transformed deviation from meta-analytic mean as a function of presence or absence of random effects in the analyst's model. White points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. Note that higher (less negative) values of Box-Cox transformed deviation result from greater deviation from the meta-analytic mean. \",\n yi_deviation_RE_plot_data %>% \n pull(plot_names) %>% \n paste0(paste0(paste0(\"**\", LETTERS[1:nrow(yi_deviation_RE_plot_data)], \"**\", sep = \"\"), sep = \": \"), ., collapse = \", \"),\n \".\")\n```\n:::\n\n::: {.cell .column-body-outset layout-nrow=\"1\"}\n\n```{.r .cell-code}\nyi_deviation_RE_plots <- \n yi_deviation_RE_plot_data %>% \n map2(.x = .$plot_data, .y = .$predictor_means, \n .f = ~ plot_model_means_RE(.x, mixed_model, .y)) %>% \n set_names(yi_deviation_RE_plot_subfigcaps)\n\npatchwork::wrap_plots(yi_deviation_RE_plots, byrow = TRUE) +\n patchwork::plot_annotation(tag_levels = 'A') +\n patchwork::plot_layout(guides = 'collect') &\n theme(legend.position = \"bottom\", axis.ticks = element_blank()) &\n xlab(NULL)\n```\n\n::: {.cell-output-display}\n![Violin plot of Box-Cox transformed deviation from meta-analytic mean as a function of presence or absence of random effects in the analyst's model. White points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. Note that higher (less negative) values of Box-Cox transformed deviation result from greater deviation from the meta-analytic mean. **A**: *Eucalyptus*, y25, **B**: *Eucalyptus*, y50, **C**: *Eucalyptus*, y75.](SM3_ExplainingDeviation_files/figure-html/fig-yi-euc-deviation-RE-plots-1.png){#fig-yi-euc-deviation-RE-plots width=768}\n:::\n:::\n\n\n\n\n## Multivariate Analysis\n\n### Effect Sizes $Z_r$\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nfilter_vars <- rlang::exprs(exclusion_set == \"complete\",\n expertise_subset == \"All\",\n publishable_subset == \"All\",\n collinearity_subset == \"All\")\n\nmultivar_mods <- \n ManyEcoEvo_viz %>% \n dplyr::filter(!!!filter_vars, model_name == \"MA_mod_mv\") %>% \n hoist(mod_fit_stats, \"R2_conditional\", \"R2_marginal\", \"Sigma\")\n\nbt_multivar_mod_R <- \n multivar_mods %>% \n ungroup %>% \n filter(dataset == \"blue tit\") %>% \n select(R2_marginal, R2_conditional) %>% \n transpose() %>% \n flatten_dbl()\n\neuc_multivar_mod_R <- \n multivar_mods %>% \n ungroup %>% \n filter(dataset == \"eucalyptus\") %>% \n select(R2_marginal, R2_conditional) %>% \n transpose() %>% \n flatten_dbl()\n\nbt_multivar_mod_sigma <- multivar_mods %>% \n filter(dataset == \"blue tit\") %>% \n round_pluck(\"Sigma\")\n\neuc_multivar_mod_sigma <- multivar_mods %>% \n filter(dataset == \"eucalyptus\") %>% \n round_pluck(\"Sigma\")\n```\n:::\n\n::: {#tbl-multivariate-models-coefs .cell .column-body-outset tbl-cap='Parameter estimates from models explaining Box-Cox transformed deviation scores from the mean $Z_r$ as a function of continuous and categorical peer-review ratings in multivariate analyses. Standard Errors ($SE$), 95% confidence intervals (95% CI) are reported for all estimates, while $\\mathit{t}$ values, degrees of freedom ($\\mathit{df}$) and $p$-values are presented for fixed-effects.'}\n\n```{.r .cell-code}\nmultivar_mods %>% \n select(dataset, model_params) %>% \n unnest(model_params) %>% \n select(-CI) %>% \n mutate(\n dataset = \n str_replace(dataset, \"eucalyptus\", \"*Eucalyptus*\"), \n Parameter = \n str_replace(Parameter, \"mixed_model\", \"random_included\")) %>% \n group_by(dataset) %>% \n gt::gt() %>% \n gt::fmt_number(columns = c(Coefficient, SE, starts_with(\"CI_\"), t), \n decimals = 2, \n drop_trailing_zeros = TRUE, \n drop_trailing_dec_mark = TRUE) %>%\n gt::fmt_scientific(\n columns = c( starts_with(\"CI_\")),\n rows = abs(CI_low) < 0.01 | abs(CI_high) < 0.01 | abs(CI_low) > 1000 | abs(CI_high) > 1000,\n decimals = 2) %>%\n gt::fmt_scientific(\n columns = c( starts_with(\"Coefficient\")),\n rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000,\n decimals = 2) %>%\n gt::fmt(columns = \"p\",\n fns = function(x) gtsummary::style_pvalue(x, \n prepend_p = TRUE)\n ) %>% \n gt::cols_label(CI_low = gt::md(\"95\\\\%CI\"),\n df_error = \"df\",\n p = gt::md(\"*p*\"),\n SE = gt::md(\"$\\\\text{SE}$\")) %>% \n gt::cols_merge(columns = starts_with(\"CI_\"), \n pattern = \"[{1},{2}]\") %>% \n gt::cols_move(columns = CI_low, after = SE) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_move(columns = c(Effects, Group), after = Parameter) %>% \n gt::text_transform(fn = function(x){\n str_remove(x, \"PublishableAsIs\") %>% \n str_replace_all(\"_\", \" \") %>% \n str_replace(\"diversity\", \"Sorensen's\") %>% \n Hmisc::capitalize()\n },\n locations = cells_body(columns = Parameter)) %>% \n gt::text_transform(fn = function(x) str_replace(x, \"ReviewerId\", \"Reviewer ID\")) %>% \n gt::text_transform(fn = function(x) map(x, gt::md), \n locations = gt::cells_row_groups()) %>% \n gt::sub_missing(missing_text = \"\") %>% \n gt::cols_hide(Effects) %>% \n gt::cols_label(Group = \"Random Effect\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n \n
ParameterRandom EffectCoefficient

\\(\\text{SE}\\)

\n

95%CI

\n
tdf

p

\n

blue tit

\n
(Intercept)−1.240.24[−1.71,−0.77]−5.16465p<0.001
RateAnalysis−2.84 × 10−30[−7.81 × 10−3,2.13 × 10−3]−1.12465p=0.3
Publishable as is0.060.2[−0.33,0.45]0.3465p=0.8
Publishable with major revision−0.090.14[−0.36,0.18]−0.67465p=0.5
Publishable with minor revision−0.020.17[−0.36,0.31]−0.14465p=0.9
Mean Sorensen's index0.330.27[−0.2,0.87]1.22465p=0.2
SD (Intercept)Reviewer ID0.160.03[0.11,0.25]


SD (Observations)Residual0.50.02[0.46,0.53]


Eucalyptus

\n
(Intercept)−2.80.77[−4.32,−1.27]−3.61337p<0.001
RateAnalysis−0.010.01[−2.27 × 10−2,1.74 × 10−3]−1.69337p=0.093
Publishable as is0.760.54[−0.3,1.82]1.42337p=0.2
Publishable with major revision0.650.36[−0.06,1.35]1.79337p=0.074
Publishable with minor revision0.550.44[−0.32,1.41]1.24337p=0.2
Mean Sorensen's index0.360.91[−1.43,2.15]0.39337p=0.7
Random included0.190.2[−0.2,0.59]0.97337p=0.3
SD (Intercept)Reviewer ID0.380.09[0.24,0.61]


SD (Observations)Residual1.060.04[0.98,1.15]


\n
\n```\n\n:::\n\n```{.r .cell-code}\nmultivar_mod_tidy <- multivar_mods %>% \n pull(model, name = \"dataset\") %>% \n map_dfr(broom.mixed::tidy, conf.int = TRUE, .id = \"dataset\")\n\nmultivar_performance_tidy <- multivar_mods %>% \n pull(model, name = \"dataset\") %>% \n map_dfr(performance::performance, .id = \"dataset\")\n```\n:::\n\n\n\n\nThe multivariate models did a poor job of explaining how different from the meta-analytic mean each analysis would be.\nFor the blue tit analyses the $R^{2}$ value for the whole model was 0.11 and for the fixed effects component was 0.01, and the residual standard deviation for the model was 0.5.\nFurther, all of the fixed effects had 95% confidence intervals that overlaped 0.\nThis evidence is all consistent with none of the predictor variables in this model (continuous review rating, categorical review rating, distinctiveness of variables included) having any meaningful effect on how far $Z_r$ estimates fell from the meta-analytic mean for the blue tit analyses.\nThe pattern is largely similar for the *Eucalyptus* multivariate analysis, in which $R^{2}$ for the whole model was 0.13 and for the fixed effects component was 0.02, and the residual standard deviation for the model was 1.06.\nThere is somewhat more of a hint of a pattern when examining the parameter estimates from the *Eucalyptus* analysis.\nIn the case of the fixed effect of categorical reviewer ratings, analyses that were reviewed as 'publishable as is' and 'publishable with major revisions' appeared to produce results more different from the meta-analytic mean than those that were in the reference class of 'deeply flawed and unpublishable'.\nHowever, the estimates are very uncertain (*Eucalyptus* fixed effect for 'publishable as is' 0.76 (95% CI -0.29,1.81), and for 'publishable with major revision' 0.06 (95% CI -0.33,0.45)).\nFurther, the collinearity between the categorical and continuous ratings make interpretation of effects involving either of these two variables unclear, and so we recommend against interpreting the pattern observed here.\nWe report this analysis only for the sake of transparency.\n\n\n\n\n::: {#tbl-multivariate-models-mod-summary .cell tbl-cap='Model summary metrics for multivariate models. $\\sigma$ is the residual standard deviation, ICC is the intra-class correlation coefficient, and ${R}_{M}^2$ and ${R}_{C}^2$ are the marginal and conditional $R^2$, respectively.'}\n\n```{.r .cell-code}\nmultivar_performance_tidy %>% \n select(dataset, starts_with(\"R2_\"), ICC, RMSE, Sigma) %>% \n mutate(dataset = \n case_when(str_detect(dataset, \"eucalyptus\") ~ \"Eucalyptus\",\n TRUE ~ dataset)) %>% \n gt::gt() %>% \n gt::fmt(columns = function(x) rlang::is_bare_numeric(x),\n fns = function(x) round(x, 2)) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_label(R2_conditional = gt::md(\"$$R^{2}_\\\\text{Conditional}$$\"),\n R2_marginal = gt::md(\"$$R^{2}_\\\\text{Marginal}$$\"),\n Sigma = gt::md(\"$$\\\\sigma$$\"),\n dataset = \"Dataset\") %>% \n gt::tab_style(locations = \n cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::fmt_number(columns = c(R2_conditional, R2_marginal, ICC, Sigma), \n decimals = 2, \n drop_trailing_zeros = TRUE, \n drop_trailing_dec_mark = TRUE)\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Dataset

$$R^{2}_\\text{Conditional}$$

\n

$$R^{2}_\\text{Marginal}$$

\n
ICCRMSE

$$\\sigma$$

\n
blue tit0.110.010.10.480.5
Eucalyptus0.130.020.111.021.06
\n
\n```\n\n:::\n:::\n\n\n\n\n### Out of sample predictions $y_i$\n\nFor the blue tit analyses, the only models that did converge, which were not singular and that had estimable random effect variances were the $y_{50}$ and $y_{75}$ prediction scenarios with Reviewer ID as the model random effect (@tbl-yi-multivar-singularity-convergence). Of the different random effects structures we trialled for the *Eucalyptus* analyses, only the model that included Reviewer ID as the random effect successfully fitted to the $y_{50}$ and $y_{75}$ prediction scenarios, with other models either failing to converge due to complete separation (`lme4::` error: `Downdated VtV is not positive definite`, see ).\n\n\n\n\n::: {#tbl-yi-multivar-singularity-convergence .cell tbl-cap='Singularity and convergence checks for all combinations of random effects specifications trialled for across subsets of out of sample predictions $y_i$ from multivariate models.' seed='1234'}\n\n```{.r .cell-code}\npossibly_parameters <- possibly(parameters::parameters, otherwise = NA)\n\nposs_extract_fit_engine <- possibly(extract_fit_engine, otherwise = NA)\n\n# ---- Create DF for combinatorial model specification ----\n\nmodel_formulas_multivar <- \n tidyr::expand_grid(outcome = \"box_cox_abs_deviation_score_estimate\",\n random_intercepts = list(\"study_id\", \n \"reviewer_id\",\n c(\"study_id\", \n \"reviewer_id\")),\n fixed_effects = list(c(\"publishable_as_is\", \n \"rate_analysis\", \n \"mean_diversity_index\", \n \"mixed_model\"),\n c(\"publishable_as_is\", \n \"rate_analysis\", \n \"mean_diversity_index\"))) %>% \n rowwise() %>% \n mutate(dataset = case_when(length(fixed_effects) == 4 ~ \"eucalyptus\", \n TRUE ~ \"blue tit\"),\n wflow_id = paste0(\"RE:\", \n paste0(random_intercepts, collapse = \"_\"))) %>% \n unite(wflow_id, dataset, wflow_id, remove = FALSE) %>% \n rowwise() %>% \n mutate(model_formulas = \n list(create_model_formulas(outcome,\n fixed_effects, \n random_intercepts)) %>% \n set_names(wflow_id),\n model_workflows = list(create_model_workflow(outcome, \n fixed_effects,\n random_intercepts)) %>% \n set_names(wflow_id))\n\nall_model_fits_multivar <- \n ManyEcoEvo_yi_results %>% \n select(dataset, estimate_type, effects_analysis) %>% \n group_by(dataset, estimate_type) %>% \n nest_join(model_formulas_multivar %>% \n select(dataset, \n model_workflows, \n fixed_effects, \n random_intercepts), \n by = join_by(dataset), \n name = \"model_workflow_sets\") %>% \n unnest(model_workflow_sets) %>% \n rowwise() %>% \n mutate(effects_analysis = \n list(effects_analysis %>% \n select(study_id, \n starts_with(\"box_cox_abs_dev\"), \n RateAnalysis, \n PublishableAsIs,\n ReviewerId,\n box_cox_var,\n mean_diversity_index,\n mixed_model) %>% \n janitor::clean_names() %>% \n mutate_if(is.character, factor)),\n fitted_mod_workflow = list(poss_fit(model_workflows, \n effects_analysis)),\n fitted_model = list(poss_extract_fit_engine(fitted_mod_workflow)),\n convergence = list(if (!is.na(fitted_model)) \n possibly_check_convergence(fitted_model)),\n singularity = list(if (!is.na(fitted_model)) \n possibly_check_singularity(fitted_model)),\n params = list(if (!is.na(fitted_model)) \n possibly_parameters(fitted_model)),\n fixed_effects = paste0(fixed_effects, collapse = \", \")\n ) %>% \n unnest_wider(random_intercepts, names_sep = \"_\") %>% \n unnest(c(convergence, singularity)) %>% \n rowwise() %>% \n replace_na(list(convergence = FALSE)) %>% \n select(-model_workflows, -fitted_mod_workflow, -effects_analysis)\n\nyi_multivar_singularity_convergence <- \n all_model_fits_multivar %>% \n left_join({all_model_fits_multivar %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(is.na(SE) | is.infinite(SE)) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type) %>% \n mutate(SE_calc = FALSE)}, \n by = join_by(dataset, \n estimate_type, \n random_intercepts_1,\n random_intercepts_2, \n fixed_effects)) %>% \n left_join({all_model_fits_multivar %>% \n unnest(params) %>% \n filter(Effects == \"random\") %>% \n filter(if_any(contains(\"CI\"), \n list(is.infinite, is.na))) %>% \n distinct(fixed_effects, \n random_intercepts_1,\n random_intercepts_2, \n dataset, \n estimate_type) %>% \n mutate(CI_calc = FALSE)},\n by = join_by(dataset, \n estimate_type, \n random_intercepts_1,\n random_intercepts_2, \n fixed_effects)) %>% \n rowwise() %>%\n mutate(across(c(SE_calc, CI_calc), ~ ifelse(is.na(.x), TRUE, .x)),\n across(c(SE_calc, CI_calc, singularity), \n ~ ifelse(is_false(convergence), NA, .x)))\n\n# If singularity == FALSE and convergence == TRUE, \n# but the model appears here, then that's because\n# the SD and CI's couldn't be estimated by parameters::\n\nyi_multivar_singularity_convergence %>% \n select(-fixed_effects, -fitted_model, -params) %>% \n arrange(random_intercepts_1,\n random_intercepts_2, \n dataset,\n estimate_type) %>% \n mutate(across(starts_with(\"random\"), \n ~ str_replace_all(.x, \"_\", \" \") %>%\n Hmisc::capitalize() %>% \n str_replace(\"id\", \"ID\")),\n dataset = str_replace(dataset, \"eucalyptus\", \"*Eucalyptus*\")) %>% \n group_by(dataset) %>% \n gt::gt(rowname_col = \"estimate_type\") %>% \n tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE), #TODO why didn't work here??\n cells_body(columns = \"SE_calc\", rows = SE_calc == FALSE),\n cells_body(columns = \"CI_calc\", rows = CI_calc == FALSE)\n )\n ) %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\", \n \"convergence\", \n \"SE_calc\",\n \"CI_calc\"))) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_label(dataset = \"Dataset\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n SE_calc = gt::md(\"Can random effects $\\\\text{SE}$ be calculated?\"),\n CI_calc = \"Can random effect CI be calculated?\"\n ) %>% \n gt::tab_spanner(label = \"Random Effects\",\n columns = gt::starts_with(\"random\")) %>% \n gt::sub_missing() %>% \n gt::cols_label_with(columns = gt::starts_with(\"random\"),\n fn = function(x) paste0(\"\")) %>% \n gt::text_transform(fn = function(x) map(x, gt::md), \n locations = cells_row_groups()) %>% \n gt_fmt_yi(columns = \"estimate_type\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n
\n Random Effects\n Model converged?Singular Fit?

Can random effects \\(\\text{SE}\\) be calculated?

\n
Can random effect CI be calculated?

blue tit

\n
$$y_{25}$$Reviewer IDyesyesnono
$$y_{50}$$Reviewer IDyesnoyesyes
$$y_{75}$$Reviewer IDyesnoyesyes
$$y_{25}$$Study IDReviewer IDyesyesnono
$$y_{50}$$Study IDReviewer IDyesnonono
$$y_{75}$$Study IDReviewer IDyesyesnono
$$y_{25}$$Study IDyesnonono
$$y_{50}$$Study IDyesnonono

Eucalyptus

\n
$$y_{25}$$Reviewer IDyesyesnono
$$y_{50}$$Reviewer IDyesnoyesyes
$$y_{75}$$Reviewer IDyesnoyesyes
$$y_{25}$$Study IDReviewer IDyesnonono
$$y_{50}$$Study IDReviewer IDyesyesnono
$$y_{25}$$Study IDyesnonono
$$y_{50}$$Study IDyesnonono
\n
\n```\n\n:::\n:::\n\n\n\n\nConsequently, we deviated from our intended plan of using random effects for both Effect ID and Reviewer ID, instead using a single random effect for Reviewer ID for the $y_{50}$ and $y_{75}$ prediction scenarios for both blue tit and *Eucalyptus* datasets (@tbl-BT-yi-multivar-summary, @tbl-BT-yi-multivar-params).\n\n\n\n\n::: {#tbl-BT-yi-multivar-params .cell .column-page tbl-cap='Parameter estimates for converging, non-singular multivariate models fitted to blue tit out-of-sample-prediction estimates $y_i$.'}\n\n```{.r .cell-code}\nyi_multivar_singularity_convergence %>% \n filter(SE_calc == TRUE) %>% \n filter(random_intercepts_1 != \"study_id\" | dataset != \"blue tit\") %>% #rm eliminated modl\n select(dataset, estimate_type, params) %>% \n unnest(params) %>% \n relocate(c(Effects, Group), .after = Parameter) %>% \n gt::gt(rowname_col = \"estimate_type\", groupname_col = \"dataset\") %>% \n gt::fmt_number(columns = c(-dataset, -estimate_type),\n decimals = 2, \n drop_trailing_zeros = TRUE, \n drop_trailing_dec_mark = TRUE\n ) %>% \n gt::text_transform(fn = function(x) str_replace(x, \"publishable_as_is\", \"Categorical Peer Rating\") %>% \n str_replace(., \"rate_analysis\", \"Continuous Peer Rating\") %>% \n str_replace(., \"mean_diversity_index\", \"Sorensen's Index\") %>% \n str_replace(., \"mixed_model\", \"Random Included\"),\n locations = cells_body(columns = c(\"Parameter\"))) %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::sub_missing(missing_text = \"\") %>% \n gt::fmt(columns = \"p\",\n fns = function(x) gtsummary::style_pvalue(x)) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = Parameter != \"(Intercept)\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::text_transform(locations = \n cells_body(columns = Group, \n rows = Group %in% c(\"reviewer_id\", \"study_id\")),\n fn = function(x){\n str_replace(x, \"_\", \" \") %>% \n Hmisc::capitalize() %>% \n str_replace(\"id\", \"ID\")\n }) %>% \n gt::cols_label(CI_low = gt::md(\"95\\\\%CI\"), df_error = \"df\", p = gt::md(\"*p*\"), SE = gt::md(\"$\\\\text{SE}$\")) %>% \n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"eucalyptus\")) %>% \n gt_fmt_yi(columns = \"estimate_type\") %>% \n fmt_number(columns = c(gt::contains(\"CI\"), \"SE\", \"t\"),\n drop_trailing_zeros = TRUE, \n drop_trailing_dec_mark = TRUE,\n decimals = 2) %>%\n gt::fmt_scientific(columns = c(\"Coefficient\"), \n rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"SE\"),\n rows = abs(SE) < 0.01 | abs(SE) > 1000,\n decimals = 2) %>% \n gt::fmt_scientific(columns = t,\n rows = abs(t) < 0.01,\n decimals = 2) %>% \n gt::fmt_scientific(columns = CI_low, \n rows = abs(CI_low) < 0.01 | abs(CI_low) > 1000) %>% \n gt::fmt_scientific(columns = CI_high, \n rows = abs(CI_high) < 0.01 | abs(CI_high) > 1000,\n decimals = 2) %>% \n gt::cols_hide(Effects) %>% \n gt::cols_merge(columns = starts_with(\"CI_\"), \n pattern = \"[{1},{2}]\") %>% \n gt::cols_hide(\"CI\") %>% \n gt::cols_label(Group = \"Random Effect\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
ParameterRandom EffectCoefficient

\\(\\text{SE}\\)

\n

95%CI

\n
tdf

p

\n
blue tit
$$y_{50}$$(Intercept)−0.420.49[−1.39,0.56]−0.842160.4
Categorical Peer Ratingpublishable as is−0.590.43[−1.44,0.26]−1.372160.2
Categorical Peer Ratingpublishable with major revision−0.270.33[−0.92,0.38]−0.812160.4
Categorical Peer Ratingpublishable with minor revision−0.40.38[−1.16,0.35]−1.052160.3
Continuous Peer Rating3.62 × 10−34.94 × 10−3[−6.13 × 10−3,0.01]0.732160.5
Sorensen's Index−1.690.66[−2.98,−0.39]−2.572160.011
SD (Intercept)Reviewer ID0.20.09[0.08,0.49]


SD (Observations)Residual0.710.04[0.64,0.79]


$$y_{75}$$(Intercept)−1.570.48[−2.51,−0.63]−3.32290.001
Categorical Peer Ratingpublishable as is0.410.41[−0.4,1.22]1.012290.3
Categorical Peer Ratingpublishable with major revision0.10.31[−0.51,0.71]0.322290.7
Categorical Peer Ratingpublishable with minor revision0.370.37[−0.36,1.1]12290.3
Continuous Peer Rating−4.10 × 10−44.70 × 10−3[−9.66 × 10−3,8.84 × 10−3]−0.09229>0.9
Sorensen's Index0.130.63[−1.12,1.38]0.212290.8
SD (Intercept)Reviewer ID0.290.07[0.18,0.45]


SD (Observations)Residual0.640.03[0.57,0.71]


eucalyptus
$$y_{50}$$(Intercept)−2.081.21[−4.47,0.31]−1.721620.087
Categorical Peer Ratingpublishable as is−0.050.86[−1.75,1.66]−0.06162>0.9
Categorical Peer Ratingpublishable with major revision0.120.58[−1.03,1.26]0.21620.8
Categorical Peer Ratingpublishable with minor revision0.160.67[−1.16,1.48]0.241620.8
Continuous Peer Rating4.04 × 10−40.01[−0.02,0.02]0.04162>0.9
Sorensen's Index1.991.58[−1.13,5.1]1.261620.2
Random Included0.310.26[−0.21,0.83]1.191620.2
SD (Intercept)Reviewer ID0.090.61[1.68 × 10−7,4.89 × 104]


SD (Observations)Residual1.30.08[1.15,1.46]


$$y_{75}$$(Intercept)−0.541.01[−2.53,1.44]−0.541610.6
Categorical Peer Ratingpublishable as is0.190.81[−1.42,1.79]0.231610.8
Categorical Peer Ratingpublishable with major revision−0.010.55[−1.11,1.08]−0.02161>0.9
Categorical Peer Ratingpublishable with minor revision0.170.63[−1.08,1.41]0.261610.8
Continuous Peer Rating−1.91 × 10−39.63 × 10−3[−0.02,0.02]−0.21610.8
Sorensen's Index0.311.31[−2.27,2.9]0.241610.8
Random Included−0.110.25[−0.6,0.38]−0.461610.6
SD (Intercept)Reviewer ID0.041.3[1.69 × 10−30,9.00 × 1026]


SD (Observations)Residual1.260.08[1.11,1.42]


\n
\n```\n\n:::\n:::\n\n::: {#tbl-BT-yi-multivar-summary .cell tbl-cap='Model summary statistics for non-singular, converging multivariate models fit to out-of-sample estimates $y_i$.'}\n\n```{.r .cell-code}\nManyEcoEvo_yi_viz %>% \n filter(model_name == \"MA_mod_mv\") %>% \n rowwise() %>% \n mutate(converged = \n possibly_check_convergence(model), \n singularity = possibly_check_singularity(model)) %>% \n select(dataset, estimate_type, mod_fit_stats, mod_glance) %>% \n hoist(mod_fit_stats, \"RMSE\", \"Sigma\", \"R2_conditional\", \"R2_marginal\", \"ICC\") %>% \n hoist(mod_glance, \"nobs\") %>% \n select(-mod_glance, -mod_fit_stats) %>% \n semi_join({ManyEcoEvo_yi_viz %>% \n filter(model_name == \"MA_mod_mv\") %>% \n rowwise() %>% \n transmute(dataset, \n estimate_type, \n converged = possibly_check_convergence(model), \n singularity = possibly_check_singularity(model)) %>% \n filter(converged, !singularity)},\n by = join_by(dataset, estimate_type)) %>% \n relocate(nobs, .after = \"ICC\") %>% \n gt::gt(groupname_col = \"dataset\", rowname_col = \"estimate_type\") %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::cols_label(estimate_type = \"Prediction Scenario\", \n R2_conditional = gt::md(\"$$R^{2}_\\\\text{Conditional}$$\"),\n R2_marginal = gt::md(\"$$R^{2}_\\\\text{Marginal}$$\"),\n Sigma = gt::md(\"$$\\\\sigma$$\"),\n dataset = \"Dataset\",\n nobs = gt::md(\"$N_{Obs}$\")) %>% \n gt::tab_style(locations = cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::cols_hide(dataset) %>% \n gt_fmt_yi(columns = \"estimate_type\") %>% \n gt::fmt_number(columns = c(gt::starts_with(\"R2\"), \"ICC\", \"Sigma\", \"RMSE\"),\n drop_trailing_zeros = TRUE,\n drop_trailing_dec_mark = TRUE,\n decimals = 2) %>%\n gt::fmt_scientific(columns = c(\"RMSE\"),\n rows = abs(RMSE) < 0.01 | abs(RMSE) > 1000,\n decimals = 2) %>%\n gt::fmt_scientific(columns = c(\"Sigma\"),\n rows = abs(Sigma) < 0.01 | abs(Sigma) > 1000,\n decimals = 2) %>%\n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"eucalyptus\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n \n\n\n\n\n\n\n \n \n \n
RMSE

$$\\sigma$$

\n

$$R^{2}_\\text{Conditional}$$

\n

$$R^{2}_\\text{Marginal}$$

\n
ICC

\\(N_{Obs}\\)

\n
blue tit
$$y_{50}$$0.680.710.110.050.07224
$$y_{75}$$0.590.640.20.030.17237
eucalyptus
$$y_{50}$$1.271.30.020.020171
$$y_{75}$$1.231.260.010.010170
\n
\n```\n\n:::\n:::\n\n\n\n\n\n## Model Summary Metrics for out-of-sample predictions $y_i$ {#sec-yi-summary}\n\n\n\n\n\n\n::: {#tbl-yi-deviation-parameter-estimates .cell .column-page tbl-cap='Model summary metrics for models of Box-Cox transformed deviation from the mean $y_i$ estimate as a function of categorical peer-review rating, continuous peer-review rating, and Sorensen\\'s index for blue tit and *Eucalyptus* analyses, and also for the inclusion of random effects for *Eucalyptus* analyses. Coefficient of determination, $R^2$, is reported for models of deviation as a function of Sorensen diversity scores and presence of random effects, while $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) are reported for models of deviation as explained by peer-review ratings. For all models the residual standard deviation $\\sigma$, root mean squared error (RMSE) were calculated. The number of observations ($N_{\\text{Obs.}}$) is displayed for reference.'}\n\n```{.r .cell-code}\ntbl_data_yi_deviation_model_params %>% \n gt::gt(rowname_col = \"dataset\") %>% \n gt::opt_stylize(style = 6, color = \"gray\") %>% \n gt::sub_missing(missing_text = \"\") %>% \n gt::cols_label(dataset = \"Dataset\",\n R2 = gt::md(\"$$R^2$$\"),\n R2_conditional = gt::md(\"$$R^{2}_\\\\text{Conditional}$$\"),\n R2_marginal = gt::md(\"$$R^{2}_\\\\text{Marginal}$$\"),\n Sigma = gt::md(\"$$\\\\sigma$$\"),\n nobs = gt::md(\"$$N_{\\\\text{Obs.}}$$\"),\n estimate_type = \"Prediction Scenario\") %>% \n gt::tab_style(locations = cells_body(rows = str_detect(dataset, \"Eucalyptus\"),\n columns = dataset),\n style = cell_text(style = \"italic\")) %>% \n gt::text_transform(\n locations = cells_stub(\n rows = estimate_type != \"y25\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::tab_style(locations = gt::cells_stub(rows = str_detect(dataset, \"Eucalyptus\")),\n style = cell_text(style = \"italic\")) %>% \n gt::fmt_number(columns = gt::contains(c(\"ICC\", \"RMSE\")), \n drop_trailing_dec_mark = TRUE,\n drop_trailing_zeros = T, \n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"RMSE\"),\n rows = abs(RMSE) < 0.01 | abs(RMSE) > 1000,\n drop_trailing_dec_mark = TRUE,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(gt::contains((\"R2_marginal\"))), \n rows = str_detect(model_name, \"continuous|categorical\"),\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_number(columns = gt::contains(c(\"R2_conditional\")), \n drop_trailing_dec_mark = TRUE,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_number(columns = gt::contains(c(\"Sigma\")), \n drop_trailing_dec_mark = TRUE,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"Sigma\"),\n rows = abs(Sigma) < 0.01 | abs(Sigma) > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_number(columns = \"R2\",\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"R2\"),\n rows = abs(R2) < 0.01 | abs(R2) > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"R2_conditional\"),\n rows = abs(R2_conditional) < 0.01 | abs(R2_conditional) > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"R2_marginal\"),\n rows = abs(R2_marginal) < 0.01 | abs(R2_marginal) > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"ICC\"),\n rows = abs(ICC) < 0.01 | abs(ICC) > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt_fmt_yi(\"estimate_type\")\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n \n \n \n
Prediction Scenario

$$N_{\\text{Obs.}}$$

\n
RMSE

$$\\sigma$$

\n

$$R^2$$

\n

$$R^{2}_\\text{Conditional}$$

\n

$$R^{2}_\\text{Marginal}$$

\n
ICC
Deviation explained by Sorensen's index
blue tit$$y_{25}$$630.580.597.76 × 10−4


$$y_{50}$$600.720.730.03


$$y_{75}$$630.710.735.99 × 10−3


Eucalyptus$$y_{25}$$380.910.941.13 × 10−3


$$y_{50}$$381.291.334.82 × 10−3


$$y_{75}$$381.261.294.32 × 10−4


Deviation explained by continuous ratings
blue tit$$y_{25}$$2370.580.58

2.48 × 10−4
$$y_{50}$$2240.680.71
0.11.74 × 10−30.1
$$y_{75}$$2370.590.63
0.21.86 × 10−20.18
Eucalyptus$$y_{25}$$1740.920.93
0.041.79 × 10−20.02
$$y_{50}$$1711.281.29

5.75 × 10−4
$$y_{75}$$1701.231.24

1.44 × 10−4
Deviation explained by categorical ratings
blue tit$$y_{25}$$2370.580.58

4.35 × 10−3
$$y_{50}$$2240.680.71
0.11.3 × 10−20.09
$$y_{75}$$2370.590.63
0.23.36 × 10−20.17
Eucalyptus$$y_{25}$$1740.890.92
0.071.1 × 10−20.06
$$y_{50}$$1711.271.29
0.013.39 × 10−39.14 × 10−3
$$y_{75}$$1701.231.25

1.95 × 10−3
Deviation explained by inclusion of random effects
Eucalyptus$$y_{25}$$380.850.870.14


$$y_{50}$$381.291.333.91 × 10−3


$$y_{75}$$381.261.294.76 × 10−3


\n
\n```\n\n:::\n:::\n\n\n\n\n## Post-hoc analysis: checking the use of model weights in all models explaining deviation from the meta-analytic mean {#sec-post-hoc-weights-analysis}\n\nAs we describe in @nte-box-weight-deviation, for models of deviation from the meta-analytic mean effect-size, we had intended to use the inverse variance of the Box-Cox transformed deviation from the meta-analytic mean as model weights. Unfortunately using our intended weights specification resulted in invalid transformed response variables for some models whereby extreme outliers were weighted more heavily (two orders of magnitude) than other effect sizes, which caused both issues in the estimated model parameters as well as convergence issues, in particular for models analysing the effect of categorical peer-review rating on deviation from the meta-analytic mean. \n\n::: {.callout-note appearance=\"simple\"}\n# Model Weight Calculation Details\n\nWe intended to use the invariance of the Box-Cox transformed deviation scores as model weights in our models of deviation from the meta-analytic mean. The variance of the Box-Cox transformation scores is calculated using the delta method ([@eq-folded-variance]). \n\n$$\n\\begin{aligned}\n\\mu_{\\text{folded}} &= \\sigma \\sqrt{\\frac{2}{\\pi}} \\exp\\left(-\\frac{\\mu^2}{2 \\sigma^2}\\right) + \\mu \\left(1 - 2 \\times \\boldsymbol{\\phi}\\left(-\\frac{\\mu}{\\sigma}; 0,1\\right)\\right) \\\\\n\\text{SE}_\\text{folded} &= \\sqrt{\\mu^2 + \\sigma^2 - \\mu_{\\text{folded}}^2} \\\\\n\\text{VAR}_\\text{folded} &= \\text{SE}_\\text{folded}^2 \\\\\n\\text{VAR}_\\text{Box-Cox} &= \\text{VAR}_\\text{folded} \\times \\left(\\lambda \\mu_{\\text{folded}}^{\\lambda - 1}\\right)^2\n\\end{aligned}\n$$ {#eq-folded-variance}\n\nWhere:\n\n- $\\mu_{\\text{folded}}$ is the folded mean of the deviation scores from the mean for effect $i$,\n- $\\text{VAR}_\\text{folded}$ is the folded variance of the deviation scores from the mean for effect $i$,\n- $\\mu$ is the deviation score from the mean for effect $i$, calculated as the difference between effect size $i$ and the mean for all effects: $\\bar{Z}_r - Z_{r_i}$,\n- $\\sigma$ is the square root of the variance of effect $i$, $\\text{VZ}_r$,\n- $\\lambda$ is the Box-Cox transformation parameter (@fig-box-cox-transformations), and\n- $\\boldsymbol{\\phi}$ is the standard normal cumulative distribution function.\n\nWhich is executed in the \\function{variance_box_cox} function from the the \\package{ManyEcoEvo} package, illustrated in the following code snippet:\n\n\n\n\n::: {.cell code-caption='Function to calculate the variance of the Box-Cox transformed deviation scores.' filename='box_cox_transform.R'}\n\n```{.r .cell-code .code-overflow-wrap}\nvariance_box_cox <- function(folded_mu, folded_v, lambda){\n variance_bc <- folded_v * (lambda * folded_mu^(lambda - 1))^2 # delta method\n return(variance_bc)\n}\n\nfolded_params <- function(abs_dev_score, VZr){\n mu <- abs_dev_score\n sigma <- sqrt(VZr)\n fold_mu <- sigma * sqrt(2/pi) * exp((-mu^2)/(2 * sigma^2)) + \n mu * (1 - 2 * pnorm(-mu/sigma)) # folded abs_dev_score\n fold_se <- sqrt(mu^2 + sigma^2 - fold_mu^2)\n fold_v <- fold_se^2 # folded VZr\n return(list(fold_mu = fold_mu, fold_v = fold_v))\n}\n```\n:::\n\n\n\n:::\n\nWe systematically investigated the impact of using different weighting schemes (no weights, inverse-Box-Cox transformed variance-, and inverse folded variance-, of the absolute deviation scores) on model convergence, singularity and other model checking metrics to aid decision-making about the appropriate weighting scheme and random-effects structure for these models. Given the convergence issues we encountered when using the intended weights, and that the desired random effects structure could not be fitted, we also investigated the impact of using different random-effects structures (effect ID, reviewer ID, or both effect ID and reviewer ID) on model convergence and singularity. For each weighting scheme, we fitted models with each different random effects structure to both the blue tit and *Eucalyptus* analyst data, and evaluated model convergence, singularity and model performance using the `performance::compare_performance` function. \n\n\n\n\n\n::: {.cell}\n\n```{.r .cell-code .code-overflow-wrap code-fold=\"true\" code-summary=\"Weight investigation code\"}\n# Create filter argument expressions\nfilter_args = rlang::exprs(exclusion_set == \"complete\", \n publishable_subset == \"All\",\n expertise_subset == \"All\",\n collinearity_subset == \"All\")\n\n# Create function to prepare ratings data\nprepare_ratings_data <- function(effects_analysis){\n data_tbl <-\n effects_analysis %>% \n unnest(cols = c(review_data)) %>% \n select(study_id, \n TeamIdentifier,\n starts_with(\"box_cox_\"),\n ReviewerId, \n PublishableAsIs,\n # lambda,\n folded_v_val) %>% \n ungroup() %>% \n mutate(PublishableAsIs = \n forcats::fct_relevel(PublishableAsIs, \n c(\"deeply flawed and unpublishable\", \n \"publishable with major revision\", \n \"publishable with minor revision\", \n \"publishable as is\")),\n obs_id = 1:n()) \n return(data_tbl)\n}\n\n# Create base model formulat\nbase_formula <- rlang::new_formula(\n rlang::expr(box_cox_abs_deviation_score_estimate), \n rlang::expr(PublishableAsIs))\n# Create weight functions\ncalc_inv_bc_var <- rlang::as_function(~ 1/pull(.x, box_cox_var))\ncalc_inv_folded_v <- rlang::as_function(~ 1/pull(.x, folded_v_val))\nno_weights <- NA\n\nweight_formulas <- list(no_weights,\n calc_inv_bc_var,\n calc_inv_folded_v\n) %>% \n purrr::set_names(\"no_weights\",\n \"inv_bc_var\",\n \"inv_folded_v\")\n\n# Create random effect expressions\nRE_rev <- expr((1 | ReviewerId))\nRE_study <- expr((1 | study_id))\nRE_both <- expr(!!RE_rev + !!RE_study)\n\nrandom_expressions <- list(\n RE_rev,\n RE_study,\n RE_both\n) %>% purrr::set_names(\"RE_rev\",\n \"RE_study\",\n \"RE_both\")\n# Create model fitting wrapper function\nlmer_wrap <- function(data_tbl, \n random_effect, \n weight_form, \n ..., \n env = caller_env()){\n f <- rlang::new_formula(expr(box_cox_abs_deviation_score_estimate), \n expr(PublishableAsIs + !!random_effect), \n env = env)\n \n weights <- if ( rlang::is_na(weight_form) ) NULL else weight_form(data_tbl)\n \n rlang::inject(lme4::lmer(!!f,\n data = data_tbl,\n weights = weights, \n ...))\n}\n\n# Fit all models\nall_models <-\n ManyEcoEvo_results %>% \n ungroup %>% \n filter(!!!filter_args) %>% \n select(dataset, effects_analysis) %>%\n hoist(.col = effects_analysis,\n \"lambda\",\n .simplify = TRUE,\n .transform = unique) %>% \n mutate(model_data = map(effects_analysis, \n prepare_ratings_data), \n .keep = \"unused\") %>% \n expand_grid(\n expand_grid(weight_formulas, random_expressions) %>% \n mutate(weight_forms = names(weight_formulas),\n random_effect = names(random_expressions)) %>% \n unite(\"model_spec\", weight_forms, random_effect, sep = \".\") \n ) %>% \n # hoist(\"model_data\", weights = list(\"study_id\", \"box_cox_var\", \"folded_v_val\"),.remove = F) %>% \n mutate(model = pmap(list(model_data, \n random_expressions, \n weight_formulas), \n lmer_wrap), \n .keep = \"unused\") %>% \n mutate(singularity = map_lgl(model, \n performance::check_singularity),\n convergence = map_lgl(model, \n performance::check_convergence))\n\npossibly_estimate_means <- possibly(modelbased::estimate_means, otherwise = NULL)\n\n# Extract Parameter Estimates\nestimate_means <- \n all_models %>% \n filter(singularity == F, convergence == T) %>%\n reframe(model = set_names(model, model_spec), \n dataset = dataset, \n model_spec = model_spec,\n weights = case_when(!str_detect(model_spec, \"no_weights\") ~ \"(weights)\",\n .default = NA)) %>% \n rowwise() %>% \n mutate(weights = modify_if(list(weights), ~ is.na(.x), ~ NULL),\n results = list(possibly_estimate_means(model,\n by = \"PublishableAsIs\", weights = weights))) %>% \n ungroup() %>% \n mutate(results = set_names(results, dataset)) %>% \n drop_na(results) # model means couldn't be estimated due to convergence issues, drop those models\n\n# evaluate and compare performance for remaining models\nmodel_comparison_results <-\n all_models %>% \n filter(model_spec != \"inv_bc_var.RE_study\" | dataset != \"eucalyptus\") %>% #rm nearly unidentifiable model\n semi_join(estimate_means, \n by = join_by(dataset, model_spec)) %>% \n group_by(dataset) %>% \n summarise(model = set_names(model, model_spec) %>% list, \n results = map(model, \n performance::compare_performance, \n rank = T), \n results = set_names(results, \n unique(dataset)), \n .groups = \"keep\")\n\nmodel_means_results <- \n estimate_means %>% \n left_join(model_comparison_results %>% \n select(-model) %>% unnest(results), \n by = join_by(\"dataset\", \"model_spec\" == \"Name\")) %>%\n mutate(label = paste(dataset, model_spec, sep = \".\")) %>% \n arrange(dataset, desc(Performance_Score)) %>% \n select(Performance_Score, dataset, model_spec, results) %>% \n mutate(label = paste(dataset, model_spec, sep = \".\")) \n```\n:::\n\n\n\n\n\n### Model Weight Investigation Findings\n\nFor the blue tit models of deviation influenced by categorical peer-review rating, no models including study ID as a random-effect were able to be properly fitted, across all model weight specifications [@tbl-weights-analysis-fit-checks]. For *Eucalyptus* models with either no weights, or inverse folded variance as weights, only models with Reviewer ID as the random-effect fitted properly. While models with either Reviewer ID or Study ID as the random-effect passed singularity and convergence fit-checks, and had estimable parameter means when the Inverse Box-Cox variance was used as a model weight [@tbl-weights-analysis-fit-checks].\n\n\n\n\n::: {#tbl-weights-analysis-fit-checks .cell tbl-cap='Singularity and convergence checks for all combinations of model weights and random-effects structure in models of the effect of categorical peer rating on deviation from the analytic mean. For some models, mean estimates of parameter levels for peer-review rating were not able to be estimated. '}\n\n```{.r .cell-code}\nall_models %>% \n select(dataset, model_spec, singularity, convergence) %>% \n left_join(estimate_means %>% select(-model, -results, -weights) %>% \n mutate(estimate_means = T)) %>% \n separate(model_spec, into = c(\"model_spec_weight\", \n \"model_spec_random_effect\"), sep = \"\\\\.\") %>%\n replace_na(replace = list(estimate_means = FALSE)) %>% \n group_by(dataset) %>% \n gt::gt(rowname_col = \"model_spec_weight\") %>% #\n gt::opt_stylize(style = 6, color = \"gray\") %>%\n gt::tab_stubhead(label = \"Model Weight\") %>%\n gt::tab_style(\n style = list(\n cell_fill(color = scales::alpha(\"red\", 0.6)),\n cell_text(color = \"white\", weight = \"bold\")\n ),\n locations = list(\n cells_body(columns = \"singularity\", rows = singularity == TRUE),\n cells_body(columns = \"convergence\", rows = convergence == FALSE),\n cells_body(columns = \"estimate_means\", rows = estimate_means == FALSE)\n )\n ) %>%\n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"eucalyptus\")) %>% \n gt::cols_label(dataset = \"Dataset\",\n singularity = \"Singular Fit?\",\n convergence = \"Model converged?\",\n model_spec_random_effect = \"Random Effects\",\n estimate_means = \"Means Estimable?\") %>% \n gt::text_transform(fn = function(x) ifelse(x == TRUE, \"yes\",\n ifelse(x == FALSE, \"no\", x)),\n locations = cells_body(columns = c(\"singularity\", \n \"convergence\", \n \"estimate_means\"))) %>%\n gt::text_transform(\n locations = cells_stub(\n rows = model_spec_random_effect != \"RE_rev\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::text_transform(\n locations = cells_body(columns = \"model_spec_random_effect\"),\n fn = function(x){\n str_replace(x, \"RE_rev\", \"Reviewer ID\") %>% \n str_replace(\"RE_study\", \"Study ID\") %>% \n str_replace(\"RE_both\", \"Reviewer ID and Study ID\")\n }\n ) %>% \n gt::text_transform(\n locations = cells_stub(),\n fn = function(x){\n str_replace(x, \"no_weights\", \"None\") %>% \n str_replace(\"inv_bc_var\", \"Inverse Box-Cox variance\") %>% \n str_replace(\"inv_folded_v\", \"Inverse folded variance\")\n }\n ) %>% \n gt::tab_style(style = cell_text(style = \"italic\", transform = \"capitalize\"), \n locations = cells_row_groups(groups = \"eucalyptus\")) \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n \n \n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n\n\n\n\n \n \n \n
Model WeightRandom EffectsSingular Fit?Model converged?Means Estimable?
blue tit
NoneReviewer IDnoyesyes
Study IDnonono
Reviewer ID and Study IDyesyesno
Inverse Box-Cox varianceReviewer IDnoyesyes
Study IDnoyesno
Reviewer ID and Study IDyesyesno
Inverse folded varianceReviewer IDnoyesyes
Study IDnoyesno
Reviewer ID and Study IDyesyesno
eucalyptus
NoneReviewer IDnoyesyes
Study IDnonono
Reviewer ID and Study IDnoyesno
Inverse Box-Cox varianceReviewer IDnoyesyes
Study IDnoyesyes
Reviewer ID and Study IDyesyesno
Inverse folded varianceReviewer IDnoyesyes
Study IDnoyesno
Reviewer ID and Study IDyesyesno
\n
\n```\n\n:::\n:::\n\n\n\n\nTo check that the alternative weighting methods generated sensible parameter estimates, we generated marginal effects plots for all models that passed the convergence and singularity checks where marginal effects were estimable for both blue tit [@fig-effects-plots-BT] and *Eucalyptus* datasets [@fig-effects-plots-Euc]. \n\nUsing the inverse Box-Cox transformed variance for model weights resulted in the marginal mean being pulled towards zero for both datasets, however, this was quite extreme for the *Eucalyptus* dataset [@fig-effects-plots-Euc]*A*. When the random-effect for study ID is substituted in place of the reviewer ID, the model means seem more fitting to the data, but uncertainty of estimates seemed artificially small [@fig-effects-plots-Euc]*B*. For this model, marginal means were all equal in both mean estimate and their standard error when the inverse Box-Cox variance was used as a weight with Study ID as the random effect because the model was nearly unidentifiable [@tbl-marginal-means-weights-analysis], so we eliminated this specification from further consideration. \n\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nmodify_plot <- function(p, .y){\n p + \n labs(subtitle = as_label(.y), \n title = NULL, \n x = \"\", \n y = \"Box-Cox transformed\\nabsolute deviation from\\nmeta-analytic mean\") + \n see::theme_lucid() + \n theme(axis.text.x = element_text(angle = 50, hjust = 1))\n}\n\nmodel_means_results %>% \n filter(dataset == \"eucalyptus\") %>% \n arrange(model_spec) %>% \n pull(results, name = model_spec) %>% \n map(., plot, at = \"PublishableAsIs\") %>% \n map2(., names(.), modify_plot) %>% \n patchwork::wrap_plots() +\n patchwork::plot_annotation(tag_levels = 'A')\n```\n\n::: {.cell-output-display}\n![Effect plots for each non-singular model that converged with estimable fixed effect group means for the *Eucalyptus* dataset.](SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-Euc-1.png){#fig-effects-plots-Euc fig-pos='page' width=960}\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nmodify_plot <- function(p, .y){\n p + \n labs(subtitle = as_label(.y), \n title = NULL, \n x = \"\", \n y = \"Box-Cox transformed\\nabsolute deviation from\\nmeta-analytic mean\") + \n see::theme_lucid() + \n theme(axis.text.x = element_text(angle = 50, hjust = 1))\n}\n\nmodel_means_results %>% \n filter(dataset == \"blue tit\") %>% \n pull(results, name = model_spec) %>% \n map(., plot, at = \"PublishableAsIs\") %>% \n map2(., names(.), modify_plot) %>% \n patchwork::wrap_plots() +\n patchwork::plot_annotation(tag_levels = 'A')\n```\n\n::: {.cell-output-display}\n![Effect plots for each non-singular model that converged with estimable fixed effect group means for the blue tit dataset.](SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-BT-1.png){#fig-effects-plots-BT width=672}\n:::\n:::\n\n::: {#tbl-marginal-means-weights-analysis .cell .column-page tbl-cap='Marginal means estimate across weight and random effects specifications for all estimable models for both *Eucalyptus* and blue tit datasets.'}\n\n```{.r .cell-code}\nmodel_means_results %>% \n select(dataset, model_spec, results) %>% \n unnest(results) %>% \n separate(model_spec, into = c(\"model_spec_weight\", \n \"model_spec_random_effect\"), sep = \"\\\\.\") %>%\n group_by(dataset) %>% \n gt::gt(rowname_col = \"model_spec_weight\") %>% #\n gt::opt_stylize(style = 6, color = \"gray\") %>%\n gt::tab_stubhead(label = \"Model Weight\") %>%\n gt::text_transform(\n locations = cells_stub(\n rows = str_detect(PublishableAsIs, \"deeply\", negate = TRUE)\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::text_transform(\n locations = cells_body(columns = \"model_spec_random_effect\"),\n fn = function(x){\n str_replace(x, \"RE_rev\", \"Reviewer ID\") %>% \n str_replace(\"RE_study\", \"Study ID\") %>% \n str_replace(\"RE_both\", \"Reviewer ID and Study ID\")\n }\n ) %>% \n gt::text_transform(\n locations = cells_stub(),\n fn = function(x){\n str_replace(x, \"no_weights\", \"None\") %>% \n str_replace(\"inv_bc_var\", \"Inverse Box-Cox variance\") %>% \n str_replace(\"inv_folded_v\", \"Inverse folded variance\")\n }\n ) %>% \n gt::cols_label(model_spec_random_effect = \"Random Effects\",\n PublishableAsIs = \"Peer Rating\",\n CI_low = gt::md(\"95\\\\%CI\"),\n SE = gt::md(\"$\\\\text{SE}$\")) %>% \n gt::cols_merge(columns = starts_with(\"CI_\"), \n pattern = \"[{1},{2}]\") %>% \n gt::fmt_number(columns = c(\"Mean\", \"SE\", \"CI_low\", \"CI_high\"),\n decimals = 2) %>% \n gt::text_transform(\n locations = cells_body(\n columns = \"model_spec_random_effect\",\n rows = PublishableAsIs != \"deeply flawed and unpublishable\"\n ),\n fn = function(x){\n paste0(\"\")\n }\n ) %>% \n gt::tab_style(\n style = list(gt::cell_text(transform = \"capitalize\"), \n gt::cell_text(style = \"italic\")),\n locations = gt::cells_row_groups(groups = \"eucalyptus\")\n ) %>% \n gt::tab_style(style = cell_text(transform = \"capitalize\"), \n locations = cells_body(columns = \"PublishableAsIs\"))\n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n\n\n\n\n\n \n \n \n
Model WeightRandom EffectsPeer RatingMean

\\(\\text{SE}\\)

\n

95%CI

\n
blue tit
NoneReviewer IDdeeply flawed and unpublishable−1.110.11[−1.33,−0.89]
publishable with major revision−1.300.05[−1.39,−1.21]
publishable with minor revision−1.300.04[−1.38,−1.22]
publishable as is−1.240.07[−1.38,−1.10]
Inverse folded varianceReviewer IDdeeply flawed and unpublishable−1.400.14[−1.67,−1.12]
publishable with major revision−1.480.06[−1.61,−1.35]
publishable with minor revision−1.430.05[−1.53,−1.32]
publishable as is−1.180.08[−1.35,−1.01]
Inverse Box-Cox varianceReviewer IDdeeply flawed and unpublishable−0.950.12[−1.18,−0.72]
publishable with major revision−1.150.06[−1.26,−1.03]
publishable with minor revision−1.050.05[−1.15,−0.95]
publishable as is−0.720.07[−0.85,−0.58]
eucalyptus
NoneReviewer IDdeeply flawed and unpublishable−2.660.27[−3.19,−2.12]
publishable with major revision−2.370.12[−2.61,−2.12]
publishable with minor revision−2.650.11[−2.86,−2.43]
publishable as is−2.600.17[−2.94,−2.27]
Inverse folded varianceReviewer IDdeeply flawed and unpublishable−3.310.26[−3.82,−2.81]
publishable with major revision−2.970.13[−3.21,−2.72]
publishable with minor revision−3.020.11[−3.23,−2.80]
publishable as is−3.080.16[−3.40,−2.77]
Inverse Box-Cox varianceReviewer IDdeeply flawed and unpublishable−0.510.39[−1.27,0.24]
publishable with major revision−1.000.30[−1.59,−0.42]
publishable with minor revision−2.220.30[−2.81,−1.64]
publishable as is−2.760.34[−3.42,−2.09]
Inverse Box-Cox varianceStudy IDdeeply flawed and unpublishable−2.580.06[−2.70,−2.45]
publishable with major revision−2.580.06[−2.70,−2.45]
publishable with minor revision−2.580.06[−2.70,−2.45]
publishable as is−2.580.06[−2.70,−2.45]
\n
\n```\n\n:::\n:::\n\n\n\n\nAfter discarding models based on the above criteria, we were left with a subset of models that passed convergence and singularity checks, and had estimable parameter means, all of which included reviewer ID as the only random effect. Although it seemed that the inverse Box-Cox variance resulted skewed estimated marginal means, it was unclear whether the inverse folded variance was a better alternative over using no weights, as the inverse folded variance seemed to exhibit a similar pattern in pulling the estimated marginal effects towards zero, but to a lesser extent than for the inverse Box-Cox variance. \n\nTo further aid in decision-making about which model weights to use, we compared the performance of all models that passed the convergence and singularity checks, and had estimable parameter means. We used the `performance::compare_performance` function to calculate performance metrics for this subset of models, and plotted the results using the `performance` package's in-built plotting method, which creates spider plots of normalised performance metrics for each model [@fig-model-comparison-weights-analysis]. For completeness, all model performance results of this final subset are reported in [@tbl-model-perf-metrics-weights-analysis].\n\nThe performance comparison plots confirmed our suspicions that the inverse Box-Cox variance was not a suitable weight for these models, and it performed relatively poorly across all metrics for both the *Eucalyptus* and blue tit datasets [@fig-model-comparison-weights-analysis]. The inverse folded variance weighted models performed similarly poorly for both datasets across all metrics except for RMSE in the case of the blue tit dataset, and both Sigma and RMSE for the *Eucalyptus* dataset. For both datasets using no weights when there is only a random-effect for Reviewer ID resulted in the best model fit and performance across all metrics. In keeping with our informed preference of using no weights, model comparison analysis highlighted that using no weights in the models was preferable.\n\n\n\n\n\n::: {#fig-model-comparison-weights-analysis .cell}\n\n```{.r .cell-code}\n# plot performance for remaining models\nmodel_comparison_plots <- \n model_comparison_results %>% \n mutate(dataset = str_replace(dataset, \n \"eucalyptus\", \n \"*Eucalyptus*\")) %>% \n pull(results, \"dataset\") %>% \n map(plot)\n# for printing plot name on figure\n# model_comparison_plots %>% \n# map2(.x = ., .y = names(.), ~ .x + ggtitle(.y) #+ \n# theme(title = ggtext::element_markdown())\n# )\nmodel_comparison_plots\n```\n\n::: {.cell-output-display}\n![Blue tit models.](SM3_ExplainingDeviation_files/figure-html/fig-model-comparison-weights-analysis-1.png){#fig-model-comparison-weights-analysis-1 width=672}\n:::\n\n::: {.cell-output-display}\n![*Eucalyptus* models.](SM3_ExplainingDeviation_files/figure-html/fig-model-comparison-weights-analysis-2.png){#fig-model-comparison-weights-analysis-2 width=672}\n:::\n\nModel performance comparison plots for a subset of models that passed convergence and singularity checks and had estimable marginal effects. Models are compared based on their performance metrics, including $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$, Intra-Class Correlation, Root Mean Squared Error (RMSE), and the weighted AIC, corrected AIC and BIC. Values of performance are normalised across models for each metric, against the top-most performing model across all metrics. Greater distance from the centre on each metric axis indicates dominance in performance. For both blue tit and *Eucalyptus* models, all included a random effect for Reviewer ID, and weights consisted of the inverse Box-Cox variance for weights (blue line), inverse folded variance (yellow line), or none (red line).\n:::\n\n::: {#tbl-model-perf-metrics-weights-analysis .cell .column-page tbl-cap='Model performance metric values (non-normalised) for final subset of models considered in weights analysis. All models in final subset included random-effect of Reviewer ID. Metrics included $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$, Intra-Class Correlation, Root Mean Squared Error (RMSE), and the weighted AIC, corrected AIC and BIC.'}\n\n```{.r .cell-code}\nall_models %>% \n filter(model_spec != \"inv_bc_var.RE_study\" | dataset != \"eucalyptus\") %>% #rm nearly unidentifiable model\n semi_join(estimate_means, \n by = join_by(dataset, model_spec)) %>% \n group_by(dataset) %>% \n summarise(model = set_names(model, model_spec) %>% list, \n results = map(model, \n performance::compare_performance, \n rank = F), \n results = set_names(results, \n unique(dataset)), \n .groups = \"keep\") %>% \n unnest(results) %>%\n select(-Model, -model) %>% \n gt::gt() %>% \n gt::cols_label(Name = \"Model Weight\",\n R2_conditional = gt::md(\"$$R^{2}_\\\\text{Conditional}$$\"),\n R2_marginal = gt::md(\"$$R^{2}_\\\\text{Marginal}$$\"),\n Sigma = gt::md(\"$$\\\\sigma$$\"),\n AICc = gt::md(\"$$AIC_c$$\"),\n AICc_wt = gt::md(\"$$AIC_c$$ (wt)\"),\n BIC_wt = gt::md(\"$$BIC$$ (wt)\"),\n AIC_wt = gt::md(\"$$AIC$$ (wt)\"),\n AIC = gt::md(\"$$AIC$$\"),\n BIC = gt::md(\"$$BIC$$\")) %>%\n gt::fmt_number(columns = contains(c(\"AIC\",\"AICc\", \"BIC\", \"R2_\", \"ICC\", \"Sigma\")), \n drop_trailing_zeros = TRUE, \n drop_trailing_dec_mark = TRUE,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"R2_conditional\",\n rows = R2_conditional < 0.01 | R2_conditional > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = \"R2_marginal\",\n rows = R2_marginal < 0.01 | R2_marginal > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>%\n gt::fmt_scientific(columns = \"RMSE\",\n rows = RMSE < 0.01 | RMSE > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"ICC\"),\n rows = ICC < 0.01 | ICC > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = c(\"Sigma\"),\n rows = Sigma < 0.01 | Sigma > 1000,\n drop_trailing_zeros = T,\n decimals = 2) %>% \n gt::fmt_scientific(columns = gt::contains(c(\"_wt\")), \n rows = Name != \"no_weights.RE_rev\",\n drop_trailing_zeros = F,\n decimals = 2) %>%\n gt::opt_stylize(style = 6, color = \"gray\") %>%\n gt::tab_style(\n style = list(gt::cell_text(transform = \"capitalize\"), \n gt::cell_text(style = \"italic\")),\n locations = gt::cells_row_groups(groups = \"eucalyptus\")\n ) %>% \n gt::fmt(\n columns = gt::contains(\"Name\"),\n fns = function(x) str_remove(x, \".RE_rev\") %>% \n str_replace(\"no_weights\", \"None\") %>%\n str_replace(\"inv_bc_var\", \"Inverse Box-Cox variance\") %>%\n str_replace(\"inv_folded_v\", \"Inverse folded variance\")\n ) \n```\n\n::: {.cell-output-display}\n\n```{=html}\n
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n \n \n \n
Model Weight

$$AIC$$

\n

$$AIC$$ (wt)

\n

$$AIC_c$$

\n

$$AIC_c$$ (wt)

\n

$$BIC$$

\n

$$BIC$$ (wt)

\n

$$R^{2}_\\text{Conditional}$$

\n

$$R^{2}_\\text{Marginal}$$

\n
ICCRMSE

$$\\sigma$$

\n
blue tit
None723.151723.331748.110.097.47 × 10−30.080.48279810.5
Inverse Box-Cox variance840.523.26 × 10−26840.73.26 × 10−26865.483.26 × 10−267.67 × 10−41.31 × 10−46.36 × 10−40.582250511.83
Inverse folded variance1,026.751.19 × 10−661,026.931.19 × 10−661,051.71.19 × 10−664.71 × 10−45.14 × 10−54.2 × 10−40.502670413.19
eucalyptus
None1,063.7411,063.9911,086.8210.130.010.121.01734091.06
Inverse Box-Cox variance1,386.886.80 × 10−711,387.126.80 × 10−711,409.966.80 × 10−712.28 × 10−92.47 × 10−102.03 × 10−91.49938724.68 × 104
Inverse folded variance1,109.351.25 × 10−101,109.61.25 × 10−101,132.431.25 × 10−105.66 × 10−41.82 × 10−55.48 × 10−41.126658619.68
\n
\n```\n\n:::\n:::\n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-1.png new file mode 100644 index 0000000..c592feb Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-2.png new file mode 100644 index 0000000..86e82cb Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-box-cox-transformations-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-1.png new file mode 100644 index 0000000..1c3ec48 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-2.png new file mode 100644 index 0000000..0a8aebf Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-3.png new file mode 100644 index 0000000..d550d30 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-BT-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-1.png new file mode 100644 index 0000000..ed885fc Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-2.png new file mode 100644 index 0000000..683941b Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-3.png new file mode 100644 index 0000000..9860fc1 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-4.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-4.png new file mode 100644 index 0000000..22db351 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-effects-plots-Euc-4.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-1.png new file mode 100644 index 0000000..757102d Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-2.png new file mode 100644 index 0000000..b07867e Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-model-comparison-weights-analysis-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-1.png new file mode 100644 index 0000000..5a970f8 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-2.png new file mode 100644 index 0000000..41f5dc4 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-3.png new file mode 100644 index 0000000..d440ddf Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-4.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-4.png new file mode 100644 index 0000000..70498fb Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-4.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-5.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-5.png new file mode 100644 index 0000000..e3ca834 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cat-rating-5.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-1.png new file mode 100644 index 0000000..b89e45b Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-2.png new file mode 100644 index 0000000..92b6c08 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-3.png new file mode 100644 index 0000000..c695069 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-4.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-4.png new file mode 100644 index 0000000..a754090 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-4.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-5.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-5.png new file mode 100644 index 0000000..c695069 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-5.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-6.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-6.png new file mode 100644 index 0000000..a754090 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-deviation-cont-rating-6.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-1.png new file mode 100644 index 0000000..ce3948d Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-2.png new file mode 100644 index 0000000..09696eb Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-3.png new file mode 100644 index 0000000..871bf74 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-euc-deviation-RE-plots-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-1.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-1.png new file mode 100644 index 0000000..e677fac Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-1.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-2.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-2.png new file mode 100644 index 0000000..1485fee Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-2.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-3.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-3.png new file mode 100644 index 0000000..aecb865 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-3.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-4.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-4.png new file mode 100644 index 0000000..0311291 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-4.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-5.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-5.png new file mode 100644 index 0000000..d8e74ea Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-5.png differ diff --git a/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-6.png b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-6.png new file mode 100644 index 0000000..f33cbc8 Binary files /dev/null and b/_freeze/supp_mat/SM3_ExplainingDeviation/figure-html/fig-yi-sorensen-plots-6.png differ diff --git a/_freeze/supp_mat/SM4_case_study_datasets/execute-results/html.json b/_freeze/supp_mat/SM4_case_study_datasets/execute-results/html.json new file mode 100644 index 0000000..01f2af2 --- /dev/null +++ b/_freeze/supp_mat/SM4_case_study_datasets/execute-results/html.json @@ -0,0 +1,15 @@ +{ + "hash": "121a3a0d7675a309e79a915daec74495", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"Correlation Matrices of Case Study Data\"\nformat: \n html:\n code-fold: true\n echo: true\neditor: visual\nnumber-sections: true\nexecute:\n freeze: auto # re-render only when source changes\ntoc: false\npre-render: \"utils.R\"\nbibliography: \n - ../ms/references.bib\n - ../ms/grateful-refs.bib\n---\n\n::: {.cell}\n\n```{.r .cell-code}\nlibrary(tidyverse)\nlibrary(ManyEcoEvo)\nlibrary(GGally)\nset.seed(1234)\nsource(here::here(\"utils.R\"))\n```\n:::\n\n\n\n\nPairwise-correlation plots for the *Eucalyptus* and blue tit case-study data provided to analysts are shown in @fig-ggpairs-eucalyptus and @fig-ggpairs-bt, respectively. Plots were created with R package `GGally` [@GGally].\n\n\n\n\n::: {.cell .fig-column-screen}\n\n```{.r .cell-code}\neuc_data %>%\n select(where(is_double),\n -Date, \n -`Quadrat no`,\n -Easting, \n -Northing,\n -`small*0.25+medium*1.25+large*2.5`,\n -`average.proportion.of.plots.containing.at.least.one.euc.seedling.of.any.size`) %>%\n GGally::ggpairs()\n```\n\n::: {.cell-output-display}\n![Pairwise correllation plot for all *Eucalyptus* dataset variables except for `Date`, `Quadrat no`, `Easting`, `Northing`.](SM4_case_study_datasets_files/figure-html/fig-ggpairs-eucalyptus-1.png){#fig-ggpairs-eucalyptus width=1920}\n:::\n:::\n\n::: {.cell .fig-column-screen}\n\n```{.r .cell-code}\nblue_tit_data %>%\n naniar::replace_with_na_all(condition = ~ .x == \".\") %>% \n mutate(across(c(contains(\"_ring\"), \n rear_nest_trt, \n hatch_year, \n hatch_nest_breed_ID,\n hatch_Area,\n hatch_Box,\n day14_measurer,\n contains(\"hatch_Box\"),\n starts_with(\"rear_\"),\n starts_with(\"hatch_nest\"),\n home_or_away,\n -rear_d0_rear_nest_brood_size,\n contains(\"manipulation\"),\n chick_sex_molec,\n Date_of_day14,\n `Extra-pair_paternity`,\n -rear_Cs_in,\n -rear_Cs_out,\n chick_survival_to_first_breed_season,\n -rear_Cs_at_start_of_rearing), \n as.factor),\n across(where(is.character), as.numeric),\n across(c(rear_Cs_out,\n rear_Cs_in,\n rear_Cs_at_start_of_rearing),\n as.integer)) %>% \n select(where(is.numeric), -`day 14 weight`) %>% \n GGally::ggpairs() \n```\n\n::: {.cell-output-display}\n![Pairwise correlation plot of all numeric variables in blue tit case study dataset](SM4_case_study_datasets_files/figure-html/fig-ggpairs-bt-1.png){#fig-ggpairs-bt width=1440}\n:::\n:::\n", + "supporting": [], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-bt-1.png b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-bt-1.png new file mode 100644 index 0000000..5dced40 Binary files /dev/null and b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-bt-1.png differ diff --git a/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-eucalyptus-1.png b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-eucalyptus-1.png new file mode 100644 index 0000000..28584b3 Binary files /dev/null and b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/fig-ggpairs-eucalyptus-1.png differ diff --git a/_freeze/supp_mat/SM4_case_study_datasets/figure-html/ggpairs-eucalyptus-1.png b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/ggpairs-eucalyptus-1.png new file mode 100644 index 0000000..594d49d Binary files /dev/null and b/_freeze/supp_mat/SM4_case_study_datasets/figure-html/ggpairs-eucalyptus-1.png differ diff --git a/_quarto.yml b/_quarto.yml index 8b654da..f8a12b8 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -1,11 +1,13 @@ project: type: book # lib-dir: "renv/library/R-4.4/aarch64-apple-darwin20/" + pre-render: "utils.R" render: - "!internal/" - "!outputs/" - "!vignettes/" - "supp_mat/" + execute-dir: project book: chapters: @@ -55,7 +57,8 @@ format: } }; - + code-link: true +css: "stylefile.css" # toc-location: left # number-depth: 1 @@ -66,8 +69,9 @@ format: # (other formats) # pdf: default -execute: - freeze: true +# execute: +# freeze: false +# cache: false diff --git a/index.qmd b/index.qmd index 400e9c9..f24e2a5 100644 --- a/index.qmd +++ b/index.qmd @@ -1884,6 +1884,9 @@ google-scholar: true editor: markdown: wrap: sentence +pre-render: utils.R +execute: + freeze: auto --- ```{r load-libs, include=FALSE,eval=TRUE,message=FALSE, echo=FALSE} @@ -1904,10 +1907,8 @@ library(broom.mixed) library(tidymodels) #todo consider porting the extract_fit_engine into the ManyEcoEvo package library(grateful) library(modelbased) +source(here::here("utils.R")) set.seed(1234) - -# Helper Funs -round_pluck <- function(data, x){pluck(data, x, \(y) round(y, 2))} ``` # Introduction @@ -2009,7 +2010,9 @@ We have mass and tarsus length data from 3720 individual chicks divided among 16 The full list of variables included in the dataset is publicly available (), along with the data (). ::: {.callout-note appearance="simple"} -**Additional Explanation:** Shortly after beginning to recruit analysts, several analysts noted a small set of related errors in the blue tit dataset. +## Additional Explanation: + +Shortly after beginning to recruit analysts, several analysts noted a small set of related errors in the blue tit dataset. We corrected the errors, replaced the dataset on our OSF site, and emailed the analysts on 19 April 2020 to instruct them to use the revised data. The email to analysts is available here (). The errors are explained in that email. @@ -2050,7 +2053,7 @@ All analysts and reviewers were offered the opportunity to share co-authorship o All analysts signed \[digitally\] a consent (ethics) document () approved by the Whitman College Institutional Review Board prior to being allowed to participate. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** +## Preregistration Deviation: Due to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft. ::: @@ -2058,10 +2061,10 @@ Due to the large number of recruited analysts and reviewers and the anticipated We identified our minimum number of analysts per dataset by considering the number of effects needed in a meta-analysis to generate an estimate of heterogeneity ($\tau^{2}$) with a 95$\%$confidence interval that does not encompass zero. This minimum sample size is invariant regardless of $\tau^{2}$. This is because the same t-statistic value will be obtained by the same sample size regardless of variance ($\tau^{2}$). -We see this by first examining the formula for the standard error, SE for variance, ($\tau^{2}$) or SE($\tau^{2}$) assuming normality in an underlying distribution of effect sizes [@knight2000]: +We see this by first examining the formula for the standard error, $\text{SE}$ for variance, ($\tau^{2}$) or ($\text{SE}\tau^{2}$) assuming normality in an underlying distribution of effect sizes [@knight2000]: $$ -SE({{\tau}^2})=\sqrt{\frac{{2\tau}^4}{n-1}} +\text{SE}({{\tau}^2})=\sqrt{\frac{{2\tau}^4}{n-1}} $$ {#eq-SE-tau} and then rearranging the above formula to show how the t-statistic is independent of $\tau^2$, as seen below. @@ -2080,13 +2083,13 @@ For the evolutionary ecology dataset that question was "To what extent is the gr They also were asked to upload their analysis files (including the dataset as they formatted it for analysis and their analysis code \[if applicable\]) and a detailed journal-ready statistical methods section. ::: {.callout-note appearance="simple"} -**Additional Explanation:** +## Additional Explanation: As is common in many studies in ecology and evolutionary biology, the datasets we provided contained many variables, and the research questions we provided could be addressed by our datasets in many different ways. For instance, volunteer analysts had to choose the dependent (response) variable and the independent variable, and make numerous other decisions about which variables and data to use and how to structure their model. ::: ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** +## Preregistration Deviation: We originally planned to have analysts complete a single survey (), but after we evaluated the results of that survey, we realized we would need a second survey () to adequately collect the information we needed to evaluate heterogeneity of results (step 5). We provided a set of detailed instructions with the follow-up survey, and these instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ). @@ -2122,7 +2125,7 @@ After providing the final review, the reviewer was simultaneously provided with The invitation to revise the original ratings was as follows: "If, now that you have seen all the analyses you are reviewing, you wish to revise your ratings of any of these analyses, you may do so now." The text box was prefaced with this prompt: "Please explain your choice to revise (or not to revise) your ratings." ::: {.callout-note appearance="simple"} -**Additional explanation:** Unregistered analysis. +## Additional explanation: Unregistered analysis. To determine how consistent peer reviewers were in their ratings, we assessed inter-rater reliability among reviewers for both the categorical and quantitative ratings combining blue tit and *Eucalyptus* data using Krippendorff's alpha for ordinal and continuous data respectively. This provides a value that is between -1 (total disagreement between reviewers) and 1 (total agreement between reviewers). @@ -2132,30 +2135,30 @@ This provides a value that is between -1 (total disagreement between reviewers) The lead team conducted the analyses outlined in this section. We described the variation in model specification in several ways. -We calculated summary statistics describing variation among analyses, including mean, SD, and range of number of variables per model included as fixed effects, the number of interaction terms, the number of random effects, and the mean, SD, and range of sample sizes. +We calculated summary statistics describing variation among analyses, including mean, $\text{SD}$, and range of number of variables per model included as fixed effects, the number of interaction terms, the number of random effects, and the mean, $\text{SD}$, and range of sample sizes. We also present the number of analyses in which each variable was included. We summarized the variability in standardized effect sizes and predicted values of dependent variables among the individual analyses using standard random effects meta-analytic techniques. First, we derived standardized effect sizes from each individual analysis. -We did this for all linear models or generalized linear models by converting the $t$ value and the degree of freedom ($df$) associated with regression coefficients (e.g. the effect of the number of siblings \[predictor\] on growth \[response\] or the effect of grass cover \[predictor\] on seedling recruitment \[response\]) to the correlation coefficient, $r$, using the following: +We did this for all linear models or generalized linear models by converting the $t$ value and the degree of freedom ($\mathit{df}$) associated with regression coefficients (e.g. the effect of the number of siblings \[predictor\] on growth \[response\] or the effect of grass cover \[predictor\] on seedling recruitment \[response\]) to the correlation coefficient, $r$, using the following: $$ -r=\sqrt{\frac{{t}^2}{\left({{t}^2}+df\right) }} +r=\sqrt{\frac{{t}^2}{\left({{t}^2}+\mathit{df}\right) }} $$ {#eq-t-to-r} -This formula can only be applied if $t$ and $df$ values originate from linear or generalized linear models [GLMs; @nakagawa2007]. -If, instead, linear mixed-effects models (LMMs) or generalized linear mixed-effects models (GLMMs) were used by a given analysis, the exact $df$ cannot be estimated. -However, adjusted $df$ can be estimated, for example, using the Satterthwaite approximation of $df$, ${df}_S$, [note that SAS uses this approximation to obtain $df$ for LMMs and GLMMs; @luke2017]. -For analyses using either LMMs or GLMMs that do not produce ${df}_S$ we planned to obtain ${df}_S$ by rerunning the same (G)LMMs using the `lmer()` or `glmer()` function in the *lmerTest* package in R [@kuznetsova2017; @base]. +This formula can only be applied if $t$ and $\mathit{df}$ values originate from linear or generalized linear models [GLMs; @nakagawa2007]. +If, instead, linear mixed-effects models (LMMs) or generalized linear mixed-effects models (GLMMs) were used by a given analysis, the exact $\mathit{df}$ cannot be estimated. +However, adjusted $\mathit{df}$ can be estimated, for example, using the Satterthwaite approximation of $\mathit{df}$, $\mathit{df}_S$, [note that SAS uses this approximation to obtain $\mathit{df}$ for LMMs and GLMMs; @luke2017]. +For analyses using either LMMs or GLMMs that do not produce $\mathit{df}_S$ we planned to obtain $\mathit{df}_S$ by rerunning the same (G)LMMs using the `lmer()` or `glmer()` function in the *lmerTest* package in R [@kuznetsova2017; @base]. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** +## Preregistration Deviation: Rather than re-run these analyses ourselves, we sent a follow-up survey (referenced above under "Primary data analyses") to analysts and asked them to follow our instructions for producing this information. The instructions are publicly available and can be found within the following files (blue tit: , *Eucalyptus*: ). ::: -We then used the $t$ values and $df_S$ from the models to obtain $r$ as per the formula above. -All $r$ and accompanying $df$ (or $df_S$) were converted to Fisher's $Z_r$ +We then used the $t$ values and $\mathit{df}_S$ from the models to obtain $r$ as per the formula above. +All $r$ and accompanying $\mathit{df}$ (or $\mathit{df}_S$) were converted to Fisher's $Z_r$ $$ Z_r = \frac{1}{2} \ln(\dfrac{1+r}{1-r}) @@ -2172,7 +2175,9 @@ To accomplish this, we derived a point estimate (out-of-sample predicted value) We originally described these three values as associated with the 25th percentile, median, and 75th percentile of the independent variable and any covariates. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** The original description of the out-of-sample specifications did not account for the facts that (a) some variables are not distributed in a way that allowed division in percentiles and that (b) variables could be either positively or negatively correlated with the dependent variable. +## Preregistration Deviation: + +The original description of the out-of-sample specifications did not account for the facts that (a) some variables are not distributed in a way that allowed division in percentiles and that (b) variables could be either positively or negatively correlated with the dependent variable. We provide a more thorough description here: We derived three point-estimates (out-of-sample predicted values) for the dependent variable of interest; one for each of three values of our primary independent variable that we specified. We also specified values for all other variables that could have been included as independent variables in analysts' models so that we could derive the predicted values from a fully specified version of any model produced by analysts. For all potential independent variables, we selected three values or categories. @@ -2185,14 +2190,23 @@ However, for some continuous and categorical predictors, we also made selections We used the 25th and 75th percentiles rather than minimum and maximum values to reduce the chance of occupying unrealistic parameter space. We planned to derive these predicted values from the model information provided by the individual analysts. -All values (predictions) were first transformed to the original scale along with their standard errors (SE); we used the delta method [@verhoef2012] for the transformation of SE. -We used the square of the SE associated with predicted values as the sampling variance in the meta-analyses described below, and we planned to analyze these predicted values in exactly the same ways as we analyzed $Z_r$ in the following analyses. +All values (predictions) were first transformed to the original scale along with their standard errors ($\text{SE}$); we used the delta method [@verhoef2012] for the transformation of $\text{SE}$. +We used the square of the $\text{SE}$ associated with predicted values as the sampling variance in the meta-analyses described below, and we planned to analyze these predicted values in exactly the same ways as we analyzed $Z_r$ in the following analyses. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** Because analysts of blue tit data chose different dependent variables on different scales, after transforming out-of-sample values to the original scales, we standardized all values as z scores ('standard scores') to put all dependent variables on the same scale and make them comparable. -This involved taking each relevant value on the original scale (whether a predicted point estimate or a SE associated with that estimate) and subtracting the value in question from the mean value of that dependent variable derived from the full dataset and then dividing this difference by the standard deviation, SD, corresponding to the mean from the full dataset. -Thus, all our out-of-sample prediction values from the blue tit data are from a distribution with the mean of 0 and SD of 1. -We did not add this step for the *Eucalyptus* data because (a) all responses were on the same scale (counts of *Eucalyptus* stems) and were thus comparable and (b) these data, with many zeros and high skew, are poorly suited for z scores. +## Preregistration Deviation: + +**1. Standardizing blue tit out-of-sample predictions** $y_i$ + +Because analysts of blue tit data chose different dependent variables on different scales, after transforming out-of-sample values to the original scales, we standardized all values as z scores ('standard scores') to put all dependent variables on the same scale and make them comparable. This involved taking each relevant value on the original scale (whether a predicted point estimate or a $\text{SE}$ associated with that estimate) and subtracting the value in question from the mean value of that dependent variable derived from the full dataset and then dividing this difference by the standard deviation, $\text{SD}$, corresponding to the mean from the full dataset (@eq-Z-VZ). Thus, all our out-of-sample prediction values from the blue tit data are from a distribution with the mean of 0 and $\text{SD}$ of 1. + +Note that we were unable to standardise some analyst-constructed variables, so these analyses were excluded from the final out-of-sample estimates meta-analysis, see @sec-excluded-yi for details and explanation. + +**2. Log-transforming *Eucalyptus* out-of-sample predictions** $y_i$ + +All analyses of the *Eucalyptus* data chose dependent variables that were on the same scale, that is, *Eucalyptus* seedling counts. Although analysts may have used different size-classes of *Eucalyptus* seedlings for their dependent variable, we considered these choices to be akin to subsetting, rather than as different response variables, since changing the size-class of the dependent variable ultimately results in observations being omitted or included. Consequently, we did not standardise *Eucalyptus* out-of-sample predictions. + +We were unable to fit quasi-Poisson or Poisson meta-regressions, as desired [@ohara2010], because available meta-analysis packages (e.g. `metafor::` and `metainc::`) do not provide implementation for outcomes as estimates-only, methods are only provided for outcomes as ratios or rate-differences between two groups. Consequently, we log-transformed the out-of-sample predictions for the *Eucalyptus* data and use the mean estimate for each prediction scenario as the dependent variable in our meta-analysis with the associated $\text{SE}$ as the sampling variance in the meta-analysis [@nakagawa2023, Table 2]. ::: We plotted individual effect size estimates ($Z_r$) and predicted values of the dependent variable ($y_i$) and their corresponding 95$\%$confidence / credible intervals in forest plots to allow visualization of the range and precision of effect size and predicted values. @@ -2208,7 +2222,7 @@ $$ {#eq-MA_yi} where $y_i$ is the predicted value for the dependent variable at the 25th percentile, median, or 75th percentile of the independent variables. The individual $Z_r$ effect sizes were weighted with the inverse of sampling variance for $Z_r$. -The individual predicted values for dependent variable ($y_i$) were weighted by the inverse of the associated $SE^2$ (original registration omitted "inverse of the" in error). +The individual predicted values for dependent variable ($y_i$) were weighted by the inverse of the associated $\text{SE}^2$ (original registration omitted "inverse of the" in error). These analyses provided an average $Z_r$ score or an average $y_i$ with corresponding 95$\%$confidence interval and allowed us to estimate two heterogeneity indices, $\tau^2$ and $I^2$. The former, $\tau^2$, is the absolute measure of heterogeneity or the between-study variance (in our case, between-effect variance) whereas $I^2$ is a relative measure of heterogeneity. We obtained the estimate of relative heterogeneity ($I^2$) by dividing the between-effect variance by the sum of between-effect and within-effect variance (sampling error variance). @@ -2218,7 +2232,9 @@ Our goal here was to visualize and quantify the degree of variation among analys We did not test for statistical significance. ::: {.callout-note appearance="simple"} -**Additional explanation:** Our use of $I^{2}$ to quantify heterogeneity violates an important assumption, but this violation does not invalidate our use of $I^{2}$ as a metric of how much heterogeneity can derive from analytical decisions. +## Additional explanation: + +Our use of $I^{2}$ to quantify heterogeneity violates an important assumption, but this violation does not invalidate our use of $I^{2}$ as a metric of how much heterogeneity can derive from analytical decisions. In standard meta-analysis, the statistic $I^{2}$ quantifies the proportion of variance that is greater than we would expect if differences among estimates were due to sampling error alone [@rosenberg2013]. However, it is clear that this interpretation does not apply to our value of $I^{2}$ because $I^{2}$ assumes that each estimate is based on an independent sample (although these analyses can account for non-independence via hierarchical modelling), whereas all our effects were derived from largely or entirely overlapping subsets of the same dataset. Despite this, we believe that $I^{2}$ remains a useful statistic for our purposes. @@ -2237,10 +2253,10 @@ We used the Box-Cox transformation on the absolute values of deviation scores to We described variation in this dependent variable with both a series of univariate analyses and a multivariate analysis. All these analyses were general linear (mixed) models. These analyses were secondary to our estimation of variation in effect sizes described above. -We wished to quantify relationships among variables, but we had no a priori expectation of effect size and made no dichotomous decisions about statistical significance. +We wished to quantify relationships among variables, but we had no *a priori* expectation of effect size and made no dichotomous decisions about statistical significance. -::: {#box-weight-deviation .callout-note appearance="simple"} -**Additional Explanation:** +::: {#nte-box-weight-deviation .callout-note appearance="simple"} +## Additional Explanation: In our meta-analyses based on Box-Cox transformed deviation scores, we leave these deviation scores unweighted. This is consistent with our registration, which did not mention weighting these scores. However, the fact that we did not mention weighting the scores was actually an error: we had intended to weight them, as is standard in meta-analysis, using the inverse variance of the Box-Cox transformed deviation scores [@eq-folded-variance]. @@ -2257,12 +2273,14 @@ To account for the non-independence of multiple ratings of the same analysis, we To account for potential differences among reviewers in their scoring of analyses, we also planned to include reviewer identity as a random effect: $$ -\begin{align} -\text{DeviationScore}_j = \text{BoxCox}(abs(\text{DeviationFromMean}_{j})) \\ -{\text{DeviationScore}}_{ij} \sim Rating_{ij} + \text{ReviewerID}_{i} + {\text{AnalysisID}}_{j} \\ -{\text{ReviewerID}}_i \sim \mathcal{N}(0,\sigma_i^2) \\ -{\text{AnalysisID}}_j \sim \mathcal{N}(0,\sigma_j^2) -\end{align} +\begin{alignat*}{2} +{\mathrm{DeviationScore}_{j}} &=&& \mathrm{BoxCox}(|\mathrm{DeviationFromMean}_{j}|) \\ +{\mathrm{DeviationScore}}_{ij} & \sim &&\mathrm{Rating}_{ij} + \\ +& &&\mathrm{ReviewerID}_{i} + \\ +& && {\mathrm{AnalysisID}}_{j} \\ +{\mathrm{ReviewerID}}_i &\sim &&\mathcal{N}(0,\sigma_i^2) \\ +{\mathrm{AnalysisID}}_j &\sim &&\mathcal{N}(0,\sigma_j^2) \\ +\end{alignat*} $$ {#eq-deviation-rating} Where $\text{DeviationFromMean}_{j}$ is the deviation from the meta-analytic mean for the $j$th analysis, $\text{ReviewerID}_{i}$ is the random intercept assigned to each $i$ reviewer, and $\text{AnalysisID}_{j}$ is the random intercept assigned to each $j$ analysis, both of which are assumed to be normally distributed with a mean of 0 and a variance of $\sigma^{2}$. @@ -2276,11 +2294,11 @@ ManyEcoEvo_viz %>% exclusion_set == "complete", publishable_subset == "All") %>% pull(model) %>% - walk( equatiomatic::extract_eq, - wrap = TRUE, - swap_var_names = - c("box_cox_abs_deviation_score_estimate" = "Box Cox(|Deviation Score|)", - "study_id" = "Analysis Identity")) + purrr::walk( equatiomatic::extract_eq, + wrap = TRUE, + swap_var_names = + c("box_cox_abs_deviation_score_estimate" = "Box Cox(|Deviation Score|)", + "study_id" = "Analysis Identity")) ``` @@ -2290,18 +2308,18 @@ Both of these analyses (1: 1-100 ratings as the fixed effect, 2: categorical rat Each of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) were to be compared once to the initial ratings provided by the peer reviewers, and again based on the revised ratings provided by the peer reviewers. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** +## Preregistration Deviation: 1. We planned to include random effects of both analysis identity and reviewer identity in these models comparing reviewer ratings with deviation scores. - However, after we received the analyses, we discovered that a subset of analyst teams had either conducted multiple analyses and/or identified multiple effects per analysis as answering the target question. - We therefore faced an even more complex potential set of random effects. - We decided that including team ID, analysis ID, and effect ID along with reviewer ID as random effects in the same model would almost certainly lead to model fit problems, and so we started with simpler models including just effect ID and reviewer ID. - However, even with this simpler structure, our dataset was sparse, with reviewers rating a small number of analyses, resulting in models with singular fit (@sec-convergence-singularity). - Removing one of the random effects was necessary for the models to converge. - The models that included the categorical quality rating converged when including reviewer ID, and the models that included the continuous quality rating converged when including effect ID. +However, after we received the analyses, we discovered that a subset of analyst teams had either conducted multiple analyses and/or identified multiple effects per analysis as answering the target question. +We therefore faced an even more complex potential set of random effects. +We decided that including team ID and effect ID along with reviewer ID as random effects in the same model would almost certainly lead to model fit problems, and so we started with simpler models including just effect ID and reviewer ID. +However, even with this simpler structure, our dataset was sparse, with reviewers rating a small number of analyses, resulting in models with singular fit (@sec-convergence-singularity). +Removing one of the random effects was necessary for the models to converge. +For both models of deviation from the meta-analytic mean explained by categorical or continuous reviewer ratings, we removed the random effect of effect ID, leaving reviewer ID as the only random effect. 2. We conducted analyses only with the final peer ratings after the opportunity for revision, not with the initial ratings. - This was because when we recorded the final ratings, they over-wrote the initial ratings, and so we did not have access to those initial values. +This was because when we recorded the final ratings, the initial ratings were over-written, therefore we did not have access to those initial values. ::: The next set of univariate analyses sought to explain deviations from the mean effects based on a measure of the distinctiveness of the set of variables included in each analysis. @@ -2310,18 +2328,18 @@ To generate an individual Sorensen's value for each analysis required calculatin We calculated the Sorensen's index values using the *betapart* package [@betapart] in R: $$ -\beta_{Sorensen} = \frac{b+c}{2a+b+c} +\beta_{\mathrm{Sorensen}} = \frac{b+c}{2a+b+c} $$ {#eq-sorensen} where $a$ is the number of variables common to both analyses, $b$ is the number of variables that occur in the first analysis but not in the second and $c$ is the number of variables that occur in the second analysis. We then used the per-model average Sorensen's index value as an independent variable to predict the deviation score in a general linear model, and included no random effect since each analysis is included only once, in R [@base]: $$ -\text{DeviationScore}_{j} \sim \beta \text{Sorensen}_{j} +\mathrm{DeviationScore}_{j} \sim \beta_{\mathrm{Sorensen}_{j}} $$ {#eq-deviation} ::: {.callout-note appearance="simple"} -**Additional explanation:** +## Additional explanation: When we planned this analysis, we anticipated that analysts would identify a single primary effect from each model, so that each model would appear in the analysis only once. Our expecation was incorrect because some analysts identified \>1 effect per analysis, but we still chose to specify our model as registered and not use a random effect. @@ -2336,15 +2354,16 @@ Finally, we conducted a multivariate analysis with the five predictors described We had stated here in the text that we would use only the revised (final) peer ratings in this analysis, so the absence of the initial ratings is not a deviation from our plan: $$ -\begin{align} -{\text{DeviationScore}}_{j} \sim {\text{RatingContinuous}}_{ij}\space+ \\ -{\text{RatingCategorical}}_{ij} \space + \\ -{\beta\text{Sorensen}}_{j} \space + \\ -{\text{AnalysisID}}_{j} \space + \\ -{\text{ReviewerID}}_{i} \\ -{\text{ReviewerID}}_i \sim \mathcal{N}(0,\sigma_i^2) \\ -{\text{AnalysisID}}_j \sim \mathcal{N}(0,\sigma_j^2) -\end{align} +\begin{alignat*}{3} +{\mathrm{DeviationScore}_{j}} &=&& \mathrm{BoxCox}(|\mathrm{DeviationFromMean}_{j}|) \\ +{\mathrm{DeviationScore}}_{ij} &\sim && {\mathrm{RatingContinuous}}_{ij} + \\ +& && {\mathrm{RatingCategorical}}_{ij} + \\ +& && {\beta_\mathrm{Sorensen}}_{j} + \\ +& && {\mathrm{AnalysisID}}_{j} + \\ +& && {\mathrm{ReviewerID}}_{i} \\ +{\mathrm{ReviewerID}}_{i} &\sim &&\mathcal{N}(0,\sigma_i^2) \\ +{\mathrm{AnalysisID}}_{j} &\sim &&\mathcal{N}(0,\sigma_j^2) +\end{alignat*} $$ {#eq-deviation-multivar} We conducted all the analyses described above eight times; for each of the four responses ($Z_r$, $y_{25th}$, $y_{50th}$, $y_{75th}$) one time for each of the two datasets. @@ -2361,20 +2380,20 @@ For instance, this means that in the case of the data on blue tit chick growth, Also, as described above, any analysis that could not produce an effect that could be converted to a signed $Z_r$ was excluded from analyses of $Z_r$. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** +## Preregistration Deviation: Some analysts had difficulty implementing our instructions to derive the out-of-sample predictions, and in some cases (especially for the *Eucalyptus* data), they submitted predictions with implausibly extreme values. -We believed these values were incorrect and thus made the conservative decision to exclude out-of-sample predictions where the estimates were \> 3 standard deviations from the mean value from the full dataset. +We believed these values were incorrect and thus made the conservative decision to exclude out-of-sample predictions where the estimates were \> 3 standard deviations from the mean value from the full dataset provided to teams for analysis. ::: ::: {.callout-note appearance="simple"} -**Additional explanation:** We conducted several unregistered analyses. +## Additional explanation: We conducted several unregistered analyses. **1. Evaluating model fit.** -We evaluated all fitted models using the `performance()` function from the *performance* package [@performance] and the `glance()` function from the *broom.mixed* package [@broommixed]. +We evaluated all fitted models using the `performance::performance()` function from the *performance* package [@performance] and the `glance()` function from the *broom.mixed* package [@broommixed]. For all models, we calculated the square root of the residual variance (Sigma) and the root mean squared error (RMSE). -For GLMMs `performance()` calculates the marginal and conditional $R^2$ values as well as the contribution of random effects (ICC), based on Nakagawa et al. [-@nakagawa2017]. +For GLMMs `performance::performance()` calculates the marginal and conditional $R^2$ values as well as the contribution of random effects (ICC), based on Nakagawa et al. [-@nakagawa2017]. The conditional $R^2$ accounts for both the fixed and random effects, while the marginal $R^2$ considers only the variance of the fixed effects. The contribution of random effects is obtained by subtracting the marginal $R^2$ from the conditional $R^2$. @@ -2385,7 +2404,9 @@ To explore this question, we removed the highest two and lowest two values of $Z To help understand the possible role of the quality of analyses in driving the heterogeneity we observed among estimates of $Z_r$, we created forest plots and recalculated our heterogeneity estimates after removing all effects from analysis teams that had received at least one rating of "deeply flawed and unpublishable" and then again after removing all effects from analysis teams with at least one rating of either "deeply flawed and unpublishable" or "publishable with major revisions". We also used self-identified levels of statistical expertise to examine heterogeneity when we retained analyses only from analysis teams that contained at least one member who rated themselves as "highly proficient" or "expert" (rather than "novice" or "moderately proficient") in conducting statistical analyses in their research area in our intake survey. -Additionally, to assess potential impacts of highly collinear predictor variables on estimates of $Z_r$ in blue tit analyses, we created forest plots and recalculated our heterogeneity estimates after we removed analyses that contained the brood count after manipulation and the highly correlated (correlation of 0.89, @fig-ggpairs-bt) brood count at day 14. This removal included the one effect based on a model that contained both these variables and a third highly correlated variable, the estimate of number of chicks fledged (the only model that included the estimate of number of chicks fledged). We did not conduct a similar analysis for the *Eucalyptus* dataset because there were no variables highly collinear with or among the primary predictors (grass cover variables) in that dataset (@fig-ggpairs-eucalyptus). + +Additionally, to assess potential impacts of highly collinear predictor variables on estimates of $Z_r$ in blue tit analyses, we created forest plots (@fig-forest-plot-Zr-collinear-rm-subset) and recalculated our heterogeneity estimates after we removed analyses that contained the brood count after manipulation and the highly correlated (correlation of 0.89, @fig-ggpairs-bt) brood count at day 14. This removal included the one effect based on a model that contained both these variables and a third highly correlated variable, the estimate of number of chicks fledged (the only model that included the estimate of number of chicks fledged). We did not conduct a similar analysis for the *Eucalyptus* dataset because there were no variables highly collinear with the primary predictors (grass cover variables) in that dataset (@fig-ggpairs-eucalyptus). + **3. Exploring possible impacts of lower quality estimates of degrees of freedom.** @@ -2395,7 +2416,7 @@ We therefore conducted a second set of (more conservative) meta-analyses that ex ::: ::: {.callout-note appearance="simple"} -**Additional explanation:** Best practices in many-analysts research. +## Additional explanation: Best practices in many-analysts research. After we initiated our project, a paper was published outlining best practices in many-analysts studies [@aczel2021]. Although we did not have access to this document when we implemented our project, our study complies with these practices nearly completely. @@ -2407,10 +2428,12 @@ The one exception is that although we requested analysis code from analysts, we We planned for analysts and initiating authors to discuss the limitations, results, and implications of the study and collaborate on writing the final manuscript for review as a stage-2 Registered Report. ::: {.callout-note appearance="simple"} -**Preregistration Deviation:** As described above, due to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft. +## Preregistration Deviation: + +As described above, due to the large number of recruited analysts and reviewers and the anticipated challenges of receiving and integrating feedback from so many authors, we limited analyst and reviewer participation in the production of the final manuscript to an invitation to call attention to serious problems with the manuscript draft. ::: -We built an R package, `ManyEcoEvo` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo` package repository and its outputs are made available to users of the package when the library is loaded. +We built an R package, `ManyEcoEvo::` to conduct the analyses described in this study [@ManyEcoEvo], which can be downloaded from [https://github.com/egouldo/ManyEcoEvo/](https://github.com/egouldo/ManyEcoEvo) to reproduce our analyses or replicate the analyses described here using alternate datasets. Data cleaning and preparation of analysis-data, as well as the analysis, is conducted in R [@base] reproducibly using the `targets` package [@targets]. This data and analysis pipeline is stored in the `ManyEcoEvo::` package repository and its outputs are made available to users of the package when the library is loaded. The full manuscript, including further analysis and presentation of results is written in Quarto [@AllaireQuarto2024]. The source code to reproduce the manuscript is hosted at [https://github.com/egouldo/ManyAnalysts/](https://github.com/egouldo/ManyAnalysts/), and the rendered version of the source code may be viewed at [https://egouldo.github.io/ManyAnalysts/](https://egouldo.github.io/ManyAnalysts/). All R packages and their versions used in the production of the manuscript are listed at @sec-sesion-info. @@ -2426,12 +2449,12 @@ filter_vars_main_analysis <- rlang::exprs(estimate_type == "Zr", collinearity_subset == "All") filter_vars_main_no_est <- rlang::exprs(exclusion_set == "complete", - publishable_subset == "All", - expertise_subset == "All", - collinearity_subset == "All") + publishable_subset == "All", + expertise_subset == "All", + collinearity_subset == "All") total_usable_analyses <- - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% unnest(data) %>% select(dataset, TeamIdentifier, submission_id, analysis_id) %>% @@ -2441,7 +2464,7 @@ total_usable_analyses <- janitor::adorn_totals() total_usable_effects <- - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% unnest(data) %>% select(dataset, TeamIdentifier, submission_id, analysis_id, split_id) %>% @@ -2451,25 +2474,25 @@ total_usable_effects <- janitor::adorn_totals() Table2 <- # model composition - ManyEcoEvo::ManyEcoEvo_study_summary %>% - filter(subset_name != "all") %>% - select(subset_name, model_term_summary) %>% - unnest(cols = model_term_summary) + ManyEcoEvo_study_summary %>% + filter(subset_name != "all") %>% + select(subset_name, model_term_summary) %>% + unnest(cols = model_term_summary) Table4 <- # Conclusions analysis - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name == "all") %>% pluck("conclusions_summary", 1) Table1 <- # Teams, analyses and model types - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% select(subset_name, model_type_summary) %>% unnest(cols = model_type_summary) # Sorensen Analysis SorensenSummary <- - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% select(subset_name, sorensen_summary) %>% unnest(cols = sorensen_summary) @@ -2477,7 +2500,7 @@ SorensenSummary <- ## ----- Review Summaries ----- ReviewData <- - ManyEcoEvo::ManyEcoEvo_results %>% + ManyEcoEvo_results %>% ungroup %>% filter(!!!filter_vars_main_analysis) %>% select(data, -dataset) %>% @@ -2499,25 +2522,25 @@ ArticleReviewSummary <- .groups = "drop_last") %>% group_by(dataset) %>% summarise( - mean=mean(reviews), - sd=sd(reviews), - min=min(reviews), - max=max(reviews), - sum=sum(reviews), + mean = mean(reviews), + sd = sd(reviews), + min = min(reviews), + max = max(reviews), + sum = sum(reviews), .groups = "drop") # how many reviewers reviewers <- ReviewData %>% n_distinct("ReviewerId") # How many reviews per reviewer -mean_reviews_by_reviewer <- round(mean(ReviewerSummary$articles), digits=2) +mean_reviews_by_reviewer <- round(mean(ReviewerSummary$articles), digits = 2) min_reviews_by_reviewer <- min(ReviewerSummary$articles) max_reviews_by_reviewer <- max(ReviewerSummary$articles) ``` ## Summary Statistics -In total, `r ManyEcoEvo::ManyEcoEvo_study_summary %>% filter(subset_name == "all") %>% pluck("n_teams") + 28` analyst teams, comprising 246 analysts, contributed `r total_usable_analyses %>% filter(dataset == "blue tit") %>% pluck("n") + total_usable_analyses %>% filter(dataset == "eucalyptus") %>% pluck("n")` usable analyses (compatible with our meta-analyses and provided with all information needed for inclusion) of the two datasets examined in this study which yielded `r filter(total_usable_effects, dataset == "blue tit") %>% pluck("n")+filter(total_usable_effects, dataset == "eucalyptus") %>% pluck("n")` effects. +In total, `r ManyEcoEvo_study_summary %>% filter(subset_name == "all") %>% pluck("n_teams") + 28` analyst teams, comprising 246 analysts, contributed `r total_usable_analyses %>% filter(dataset == "blue tit") %>% pluck("n") + total_usable_analyses %>% filter(dataset == "eucalyptus") %>% pluck("n")` usable analyses (compatible with our meta-analyses and provided with all information needed for inclusion) of the two datasets examined in this study which yielded `r filter(total_usable_effects, dataset == "blue tit") %>% pluck("n")+filter(total_usable_effects, dataset == "eucalyptus") %>% pluck("n")` effects. Analysts produced `r filter(total_usable_effects, dataset == "blue tit") %>% pluck("n")` distinct effects that met our criteria for inclusion in at least one of our meta-analyses for the blue tit dataset. Analysts produced `r filter(total_usable_effects, dataset == "eucalyptus") %>% pluck("n")` distinct effects meeting our criteria for inclusion for the *Eucalyptus* dataset. Excluded analyses and effects either did not answer our specified biological questions, were submitted with insufficient information for inclusion in our meta-analyses, or were incompatible with production of our effect size(s). @@ -2527,10 +2550,10 @@ For both datasets, most submitted analyses incorporated mixed effects. Submitted analyses of the blue tit dataset typically specified normal error and analyses of the *Eucalyptus* dataset typically specified a non-normal error distribution (@tbl-Table1). For both datasets, the composition of models varied substantially in regards to the number of fixed and random effects, interaction terms, and the number of data points used, and these patterns differed somewhat between the blue tit and *Eucalyptus* analyses (See @tbl-Table2). -Focussing on the models included in the $Z_r$ analyses (because this is the larger sample), blue tit models included a similar number of fixed effects on average (mean `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% pluck("max")`) as *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% pluck("max")`), but the standard deviation in number of fixed effects was somewhat larger in the *Eucalyptus* models. -The average number of interaction terms was much larger for the blue tit models (mean `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("max")`) than for the *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% pluck("max")`), but still under 0.5 for both, indicating that most models did not contain interaction terms. -Blue tit models also contained more random effects (mean `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% pluck("max")`) than *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% pluck("max")`). -The maximum possible sample size in the blue tit dataset (3720 nestlings) was an order of magnitude larger than the maximum possible in the *Eucalyptus* dataset (`r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("max")` plots), and the means and standard deviations of the sample size used to derive the effects eligible for our study were also an order of magnitude greater for the blue tit dataset (mean `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% pluck("min")`) relative to the *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% round_pluck("sd")` SD, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("max")`). +Focussing on the models included in the $Z_r$ analyses (because this is the larger sample), blue tit models included a similar number of fixed effects on average (mean `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "fixed", subset_name == "effects") %>% pluck("max")`) as *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "fixed", subset_name == "effects") %>% pluck("max")`), but the standard deviation in number of fixed effects was somewhat larger in the *Eucalyptus* models. +The average number of interaction terms was much larger for the blue tit models (mean `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("max")`) than for the *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "blue tit", variable == "interactions", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "interactions", subset_name == "effects") %>% pluck("max")`), but still under 0.5 for both, indicating that most models did not contain interaction terms. +Blue tit models also contained more random effects (mean `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "random", subset_name == "effects") %>% pluck("max")`) than *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "random", subset_name == "effects") %>% pluck("max")`). +The maximum possible sample size in the blue tit dataset (3720 nestlings) was an order of magnitude larger than the maximum possible in the *Eucalyptus* dataset (`r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("max")` plots), and the means and standard deviations of the sample size used to derive the effects eligible for our study were also an order of magnitude greater for the blue tit dataset (mean `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "blue tit", variable == "samplesize", subset_name == "effects") %>% pluck("min")`) relative to the *Eucalyptus* models (mean `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% round_pluck("mean")` $\pm$ `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% round_pluck("sd")` $\text{SD}$, range: `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("min")` to `r Table2 %>% filter(dataset == "eucalyptus", variable == "samplesize", subset_name == "effects") %>% pluck("max")`). However, the standard deviation in sample size from the *Eucalyptus* models was heavily influenced by a few cases of dramatic sub-setting (described below). Approximately three quarters of *Eucalyptus* models used sample sizes within 3$\%$ of the maximum. In contrast, fewer than 20$\%$ of blue tit models relied on sample sizes within 3$\%$ of the maximum, and approximately 50$\%$ of blue tit models relied on sample sizes 29$\%$ or more below the maximum. @@ -2569,14 +2592,14 @@ Table4 %>% rename_with(~ str_replace(., "none", "None"), starts_with("none_")) %>% rename_with(~ str_replace(., "pos", "Positive"), starts_with("pos_")) %>% ungroup %>% - gt::gt() %>% + gt::gt() %>% gt::cols_label_with(fn = Hmisc::capitalize) %>% gt::cols_label_with(fn = ~ str_replace(.,"_", " ")) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::fmt(columns = "dataset",rows = str_detect(dataset, "eucalyptus"), fns = Hmisc::capitalize) %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "eucalyptus"), columns = dataset), - style = cell_text(style = "italic")) + style = cell_text(style = "italic")) ``` ## Distribution of Effects @@ -2584,40 +2607,34 @@ Table4 %>% ### Standardized Effect Sizes ($Z_r$) ```{r calc_MA_mod_coefs, eval=TRUE,include=TRUE, echo = FALSE} -coefs_MA_mod <- bind_rows( ManyEcoEvo_viz %>% - filter(model_name == "MA_mod") %>% - filter(exclusion_set == "complete"), - ManyEcoEvo_viz %>% - filter(model_name == "MA_mod") %>% - filter(exclusion_set == "complete-rm_outliers")) %>% +coefs_MA_mod <- ManyEcoEvo_viz %>% + filter(if_all(ends_with("subset"), .fns = ~ .x == "All"), + exclusion_set %in% c("complete", "complete-rm_outliers"), + model_name == "MA_mod") %>% hoist(tidy_mod_summary) %>% select(-starts_with("mod"), -ends_with("plot"), -estimate_type) %>% - unnest(cols = c(tidy_mod_summary)) + unnest(cols = c(tidy_mod_summary)) %>% + filter(type == "summary") ``` ```{r inline-text-Zr-data, message=FALSE, echo=FALSE} bt_complete_data <- ManyEcoEvo_viz %>% - filter(exclusion_set == "complete", - estimate_type == "Zr", + filter(!!!filter_vars_main_analysis, model_name == "MA_mod", - dataset == "blue tit", - publishable_subset == "All", - expertise_subset == "All") %>% - select(model) %>% - mutate(plot_data = map(model, - .f = ~ broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - dplyr::mutate(point_shape = - ifelse(stringr::str_detect(term, "overall"), - "diamond", - "circle"), - Parameter = - forcats::fct_reorder(term, - estimate) %>% - forcats::fct_reorder(., - point_shape, - .desc = TRUE)) + dataset == "blue tit" + ) %>% + select(tidy_mod_summary) %>% + mutate(plot_data = map(tidy_mod_summary, + .f = ~ dplyr::mutate(.x, point_shape = + ifelse(stringr::str_detect(term, "overall"), + "diamond", + "circle"), + Parameter = + forcats::fct_reorder(term, + estimate) %>% + forcats::fct_reorder(., + point_shape, + .desc = TRUE)) ), meta_analytic_mean = map_dbl(plot_data, ~ filter(.x, Parameter == "overall") %>% @@ -2627,41 +2644,24 @@ bt_complete_data <- ManyEcoEvo_viz %>% mutate(parameter_type = case_when(str_detect(Parameter, "overall") ~ "mean", TRUE ~ "study")) -# bt_complete_data <- -# bt_complete_data %>% -# rename(id_col = term) %>% -# group_by(type) %>% -# group_split() %>% -# set_names(., bt_complete_data$type %>% unique) %>% -# map_if(.x = ., names(.) == "study", -# .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% -# bind_rows() %>% -# rename(term = id_col) - - complete_euc_data <- ManyEcoEvo_viz %>% - filter(exclusion_set == "complete", - estimate_type == "Zr", + filter(!!!filter_vars_main_analysis, model_name == "MA_mod", - dataset == "eucalyptus", - publishable_subset == "All", - expertise_subset == "All") %>% - select(model) %>% - mutate(plot_data = map(model, - .f = ~ broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - dplyr::mutate(point_shape = - ifelse(stringr::str_detect(term, "overall"), - "diamond", - "circle"), - Parameter = - forcats::fct_reorder(term, - estimate) %>% - forcats::fct_reorder(., - point_shape, - .desc = TRUE)) + dataset == "eucalyptus") %>% + select(tidy_mod_summary) %>% + mutate(plot_data = map(tidy_mod_summary, + .f = ~ dplyr::mutate(.x, + point_shape = + ifelse(stringr::str_detect(term, "overall"), + "diamond", + "circle"), + Parameter = + forcats::fct_reorder(term, + estimate) %>% + forcats::fct_reorder(., + point_shape, + .desc = TRUE)) ), meta_analytic_mean = map_dbl(plot_data, ~ filter(.x, Parameter == "overall") %>% @@ -2670,27 +2670,18 @@ complete_euc_data <- unnest(cols = c("plot_data")) %>% mutate(parameter_type = case_when(str_detect(Parameter, "overall") ~ "mean", TRUE ~ "study")) -# -# complete_euc_data <- -# complete_euc_data %>% -# rename(id_col = term) %>% -# group_by(type) %>% -# group_split() %>% -# set_names(., complete_euc_data$type %>% unique) %>% -# map_if(.x = ., names(.) == "study", -# .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% -# bind_rows() %>% -# rename(term = id_col) #find the second smallest - small2 <- function(x) { - u <- unique(x) -sort(u, decreasing = FALSE)[2L]} - +small2 <- function(x) { + u <- unique(x) + sort(u, decreasing = FALSE)[2L] +} + #find the second largest - large2 <- function(x) { - u <- unique(x) -sort(u, decreasing = TRUE)[2L]} +large2 <- function(x) { + u <- unique(x) + sort(u, decreasing = TRUE)[2L] +} ``` Although the majority (`r bt_complete_data %>% filter(estimate < 0, type == "study") %>% nrow()` of `r filter(Table1, dataset == "blue tit", subset_name == "effects") %>% pluck("totalanalyses")`) of the usable $Z_r$ effects from the blue tit dataset found nestling growth decreased with sibling competition, and the meta-analytic mean $\bar{Z_r}$ (Fisher's transformation of the correlation coefficient) was convincingly negative (`r filter(coefs_MA_mod, dataset == "blue tit", !!!filter_vars_main_no_est) %>% round_pluck("estimate")` $\pm$ `r filter(coefs_MA_mod, dataset == "blue tit", !!!filter_vars_main_no_est) %>% mutate(interval = estimate - conf.low) %>% round_pluck("interval")` 95$\%$CI), there was substantial variability in the strength and the direction of this effect. @@ -2704,10 +2695,8 @@ Most values of $Z_r$ were relatively small with values $\lt |0.2|$ and the meta- Of the `r Table1 %>% filter(subset_name == "effects", dataset == "eucalyptus") %>% pluck("totalanalyses")` effects, `r complete_euc_data %>% filter(estimate >0 & conf.low <= 0 | estimate <0 & conf.high >= 0, type == "study") %>% nrow() %>% xfun::numbers_to_words()` had confidence intervals overlapping zero, approximately a quarter (`r complete_euc_data %>% filter(estimate < 0, conf.high < 0, type == "study") %>% nrow() %>% xfun::numbers_to_words()`) crossed the traditional threshold of statistical significance indicating a negative relationship between grass cover and seedling success, and `r complete_euc_data %>% filter(estimate > 0 & conf.low > 0, type == "study") %>% nrow() %>% xfun::numbers_to_words()` crossed the significance threshold indicating a positive relationship between grass cover and seedling success (@fig-forest-plots-Zr-2). ```{r plot_forest_fn, echo = FALSE, message = FALSE} - - plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ - if (MA_mean == FALSE){ + if (MA_mean == FALSE) { data <- filter(data, Parameter != "overall") } @@ -2743,6 +2732,7 @@ plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ } return(p) + } ``` @@ -2776,7 +2766,7 @@ bt_forest <- bt_complete_data %>% euc_forest <- complete_euc_data %>% filter(estimate > -2) %>% - arrange(desc(type)) %>% + arrange(desc(type)) %>% mutate(type = forcats::as_factor(type)) %>% group_by(type) %>% arrange(desc(estimate),.by_group = TRUE) %>% @@ -2784,10 +2774,10 @@ euc_forest <- complete_euc_data %>% point_shape = case_when(str_detect(type, "summary") ~ "mean", TRUE ~ "study")) %>% plot_forest(intercept = TRUE, MA_mean = TRUE) + - theme(axis.text.x = element_text(size = 15), + theme(axis.text.x = element_text(size = 15), axis.title.x = element_text(size = 15), axis.text.y = element_blank() - ) + + ) + scale_y_continuous(limits = c(-1.6, 0.65)) bt_forest @@ -2798,58 +2788,6 @@ euc_forest ### Out-of-sample predictions $y_{i}$ ```{r define-plot-forest-function, echo = FALSE, message=FALSE, warning=FALSE} -plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ - if(MA_mean == FALSE){ - data <- filter(data, study_id != "overall") - } - - data <- data %>% - group_by(study_id) %>% - group_nest() %>% - hoist(data, "estimate",.remove = FALSE) %>% - hoist(estimate, y50 = 2) %>% - select(-estimate) %>% - unnest(data) %>% - arrange(desc(type)) %>% - mutate(type = forcats::as_factor(type)) %>% - group_by(type) %>% - arrange(desc(y50),.by_group = TRUE) %>% - mutate(study_id = forcats::as_factor(study_id), - point_shape = case_when(str_detect(type, "summary") ~ "mean", - TRUE ~ "study")) - - p <- ggplot(data, aes(y = estimate, - x = study_id, - ymin = conf.low, - ymax = conf.high, - shape = point_shape, - colour = estimate_type - )) + - geom_pointrange(position = position_jitter(width = 0.1)) + - ggforestplot::theme_forest() + - theme(axis.line = element_line(linewidth = 0.10, colour = "black"), - # axis.line.y = element_blank(), - text = element_text(family = "Helvetica")) + - guides(shape = guide_legend("Estimate Type"), colour = guide_legend("Prediction Scenario")) + - coord_flip() + - labs(y = "Standardised Out of Sample Predictions, Z", - x = element_blank()) + - scale_y_continuous(breaks = seq(from = round(min(data$conf.low)), to = round(max(data$conf.high)), by = 1), - minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) + - NatParksPalettes::scale_color_natparks_d("Glacier") - - if(intercept == TRUE){ - p <- p + geom_hline(yintercept = 0) - } - if(MA_mean == TRUE){ - # p <- p + geom_hline(aes(yintercept = meta_analytic_mean), - # data = data, - # colour = "#01353D", - # linetype = "dashed") - } - - print(p) -} plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numeric(2L)){ if(MA_mean == FALSE){ @@ -2872,13 +2810,13 @@ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numer TRUE ~ "study")) p <- ggplot(plot_data, aes(y = estimate, - x = study_id, - ymin = conf.low, - ymax = conf.high, - # shape = type, - shape = point_shape, - colour = estimate_type - )) + + x = study_id, + ymin = conf.low, + ymax = conf.high, + # shape = type, + shape = point_shape, + colour = estimate_type + )) + geom_pointrange(position = position_dodge(width = 0.5)) + ggforestplot::theme_forest() + theme(axis.line = element_line(linewidth = 0.10, colour = "black"), @@ -2919,131 +2857,77 @@ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numer print(p) } +# ---- new code ---- + +MA_yi_summary_stats <- + bind_rows( + {ManyEcoEvo_yi_viz %>% + filter(dataset == "eucalyptus", model_name == "MA_mod") %>% + unnest(cols = tidy_mod_summary) %>% + mutate(response_scale = list(log_back(estimate, std.error, 1000)), + .by = c(dataset, estimate_type, term, type), + .keep = "used") %>% + select(-estimate, -std.error) %>% + unnest_wider(response_scale) %>% + rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% + nest(tidy_mod_summary = c(-dataset, -estimate_type))}, + {ManyEcoEvo_yi_viz %>% + filter(dataset == "blue tit", + model_name == "MA_mod") %>% + select(estimate_type, dataset, tidy_mod_summary)} + ) %>% + mutate(MA_mean = map(tidy_mod_summary, filter, type == "summary")) %>% + hoist(MA_mean, + mean = "estimate", + MA_conf.low = "conf.low", + MA_conf.high = "conf.high") %>% + mutate(max_min_est = + map(tidy_mod_summary, ~ filter(.x, type == "study") %>% + summarise(max_est = max(estimate), + min_est = min(estimate))), + max_min_CI = + map(tidy_mod_summary, ~ filter(.x, type == "study") %>% + summarise(max_upper_CI = max(conf.high), + min_lower_CI = min(conf.low)))) %>% + unnest_wider(col = c(max_min_est, max_min_CI)) %>% + select(-MA_mean) %>% + rename(MA_mean = mean) -fit_MA_mv <- function(effects_analysis, Z_colname, VZ_colname, estimate_type){ - Zr <- effects_analysis %>% pull({{Z_colname}}) - VZr <- effects_analysis %>% pull({{VZ_colname}}) - mod <- ManyEcoEvo::fit_metafor_mv(estimate = Zr, - variance = VZr, - estimate_type = estimate_type, - data = effects_analysis) - return(mod) -} - -back_transformed_predictions <- - ManyEcoEvo_yi %>% - dplyr::mutate(data = - purrr::map(data, - ~ dplyr::filter(.x, - stringr::str_detect(response_variable_type, "constructed", negate = TRUE)))) %>% - prepare_response_variables_yi(estimate_type = "yi", - param_table = ManyEcoEvo:::analysis_data_param_tables) %>% - generate_yi_subsets() - - -raw_mod_data_logged <- - back_transformed_predictions %>% - filter(dataset == "eucalyptus") %>% - group_by(estimate_type) %>% - select(estimate_type, data) %>% - unnest(data) %>% - rename(study_id = id_col) %>% - hoist(params, param_mean = list("value", 1), param_sd = list("value", 2)) %>% - rowwise() %>% - mutate(exclusion_threshold = param_mean + 3*param_sd) %>% - filter(fit < exclusion_threshold) %>% - mutate(log_vals = map2(fit, se.fit, log_transform, 1000)) %>% - unnest(log_vals) %>% - select(study_id, - TeamIdentifier, - estimate_type, - starts_with("response_"), - -response_id_S2, - ends_with("_log")) %>% - group_by(estimate_type) %>% - nest() - +eucalyptus_yi_plot_data <- MA_yi_summary_stats %>% #extract euc data for plotting (on count scale, not log scale) + filter(dataset == "eucalyptus") %>% + select(dataset, estimate_type, tidy_mod_summary) -mod_data_logged <- raw_mod_data_logged %>% - mutate(MA_mod = - map(data, - ~fit_MA_mv(.x, mean_log, std.error_log, "yi"))) - - -plot_data_logged <- mod_data_logged %>% - mutate(tidy_mod = map(.x = MA_mod, - ~broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - rename(study_id = term))) %>% - select(tidy_mod) %>% - unnest(cols = c(tidy_mod)) - - -MA_yi_summary_stats <- # ALL ON logged RESPONSE SCALE for EUC, standardized response values for BT - plot_data_logged %>% - mutate(response_scale = map2(estimate, std.error, log_back, 100)) %>% - select(estimate_type, study_id, type, response_scale) %>% - unnest(response_scale) %>% - rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% - nest(tidy_mod = -estimate_type) %>% - mutate(dataset = "eucalyptus") %>% - bind_rows({ - ManyEcoEvo_yi_results %>% - ungroup() %>% - filter(exclusion_set == "complete", dataset == "blue tit") %>% - select(dataset, estimate_type, MA_mod, effects_analysis, -exclusion_set) %>% - group_by(estimate_type, dataset) %>% - transmute(tidy_mod = map(.x = MA_mod, - ~broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - rename(study_id = term))) - }) %>% - mutate(MA_mean = map(tidy_mod, filter, type == "summary")) %>% - hoist(MA_mean, - mean = "estimate", - MA_conf.low = "conf.low", - MA_conf.high = "conf.high") %>% - mutate(max_min_est = map(tidy_mod, - ~ filter(.x, type == "study") %>% - summarise(max_est = max(estimate), - min_est = min(estimate)))) %>% - mutate(max_min_CI = map(tidy_mod, - ~ filter(.x, type == "study") %>% - summarise(max_upper_CI = max(conf.high), - min_lower_CI = min(conf.low)))) %>% - unnest_wider(col = c(max_min_est, max_min_CI)) %>% - ungroup %>% - rows_update({plot_data_logged %>% #hells yes to this gem of a function! - mutate(dataset = "eucalyptus") %>% - filter(type != "summary") %>% - nest(tidy_mod = c(-estimate_type, -dataset))}, - by = c("dataset", "estimate_type")) %>% +MA_yi_summary_stats <- + MA_yi_summary_stats %>% + left_join(., {MA_yi_summary_stats %>% + select(dataset, estimate_type, tidy_mod_summary) %>% + filter(dataset == "blue tit") %>% mutate(no_effect = - map_int(tidy_mod, - ~ filter(.x, - estimate >0 & conf.low <= 0 | estimate <0 & conf.high >= 0, - type == "study") %>% - nrow() ), - pos_sign = - map_int(tidy_mod, - ~ filter(.x, estimate >0, conf.low > 0, - type == "study") %>% - nrow()), - neg_sign = - map_int(tidy_mod, - ~ filter(.x, estimate < 0, conf.high < 0, - type == "study") %>% - nrow()), + map_int(tidy_mod_summary, + ~ filter(.x, + estimate >0 & conf.low <= 0 | estimate <0 & conf.high >= 0, + type == "study") %>% + nrow() ), + pos_sign = + map_int(tidy_mod_summary, + ~ filter(.x, estimate >0, conf.low > 0, + type == "study") %>% + nrow()), + neg_sign = + map_int(tidy_mod_summary, + ~ filter(.x, estimate < 0, conf.high < 0, + type == "study") %>% + nrow()), total_effects = - map_int(tidy_mod, + map_int(tidy_mod_summary, ~ filter(.x, type == "study") %>% - nrow() - )) %>% - select(-tidy_mod, -MA_mean) %>% - rename(MA_mean = mean) + nrow() + ), + .by = c("dataset", "estimate_type"), + .keep = "none")}, + by = join_by(dataset, estimate_type)) %>% + select(-tidy_mod_summary) ``` As with the effect size $Z_r$, we observed substantial variability in the size of out-of-sample predictions derived from the analysts' models. @@ -3064,25 +2948,70 @@ The meta-analytic mean predictions for these three scenarios were similar; `r fi #| echo: false #| fig-keep: last -yi_plot_data_bt <- - ManyEcoEvo_yi_results %>% - filter(exclusion_set == "complete", dataset == "blue tit") %>% - select(MA_mod, -exclusion_set) %>% - group_by(estimate_type, dataset) %>% - transmute(tidy_mod = map(MA_mod, - ~ broom::tidy(.x, conf.int = TRUE, include_studies = TRUE) %>% - rename(id_col = term))) %>% - unnest(tidy_mod) +plot_forest_yi <- function(data, intercept = TRUE, MA_mean = TRUE){ + if (MA_mean == FALSE){ + data <- filter(data, study_id != "overall") + } -yi_plot_data_bt %>% - # group_by(type) %>% - # group_split() %>% - # set_names(., yi_plot_data_bt$type %>% unique) %>% - # map_if(.x = ., names(.) == "study", - # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% - # bind_rows() %>% - rename(study_id = id_col) %>% - plot_forest(., intercept = TRUE, MA_mean = TRUE) + + data <- data %>% + filter(!is.na(type))%>% + group_by(study_id) %>% + group_nest() %>% + hoist(data, "estimate",.remove = FALSE) %>% + hoist(estimate, y50 = 2) %>% + select(-estimate) %>% + unnest(data) %>% + arrange(desc(type)) %>% + mutate(type = forcats::as_factor(type)) %>% + group_by(type) %>% + arrange(desc(y50),.by_group = TRUE) %>% + mutate(study_id = forcats::as_factor(study_id), + point_shape = case_when(str_detect(type, "summary") ~ "mean", + TRUE ~ "study")) + + p <- ggplot(data, aes(y = estimate, + x = study_id, + ymin = conf.low, + ymax = conf.high, + shape = point_shape, + colour = estimate_type + )) + + geom_pointrange(position = position_jitter(width = 0.1)) + + ggforestplot::theme_forest() + + theme(axis.line = element_line(linewidth = 0.10, colour = "black"), + # axis.line.y = element_blank(), + text = element_text(family = "Helvetica")) + + guides(shape = guide_legend("Estimate Type"), + colour = guide_legend("Prediction Scenario")) + + coord_flip() + + labs(y = "Standardised Out of Sample Predictions, Z", + x = element_blank()) + + scale_y_continuous(breaks = seq(from = round(min(data$conf.low,na.rm = TRUE)), + to = round(max(data$conf.high, na.rm = TRUE)), by = 1), + minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) + + NatParksPalettes::scale_color_natparks_d("Glacier") + + if (intercept == TRUE) { + p <- p + geom_hline(yintercept = 0) + } + if (MA_mean == TRUE) { + # p <- p + geom_hline(aes(yintercept = meta_analytic_mean), + # data = data, + # colour = "#01353D", + # linetype = "dashed") + } + + print(p) +} + +ManyEcoEvo_yi_viz %>% + filter(dataset == "blue tit") %>% + group_by(estimate_type, dataset) %>% + select(estimate_type, dataset, tidy_mod_summary) %>% + unnest(cols = tidy_mod_summary) %>% + rename(study_id = term) %>% + ungroup %>% + plot_forest_yi(., intercept = TRUE, MA_mean = TRUE) + theme(axis.text.y = element_blank()) ``` @@ -3094,26 +3023,26 @@ yi_plot_data_bt %>% #| message: false #| fig-keep: last -plot_data_logged %>% - mutate(response_scale = map2(estimate, std.error, log_back, 1000)) %>% - select(estimate_type, study_id, type, response_scale) %>% - unnest(response_scale) %>% - rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% -# filter(estimate <1000) %>% - plot_forest_2(MA_mean = T,y_zoom = c(0,40)) + - theme(axis.text.y = element_blank()) +eucalyptus_yi_plot_data %>% + group_by(estimate_type, dataset) %>% + select(estimate_type, dataset, tidy_mod_summary) %>% + unnest(cols = tidy_mod_summary) %>% + rename(study_id = term) %>% + ungroup %>% + plot_forest_2(MA_mean = T,y_zoom = c(0,150)) + + theme(axis.text.y = element_blank()) ``` ## Quantifying Heterogeneity ### Effect Sizes ($Z_r$) -```{r zr heterogeneity values, echo = FALSE} +```{r zr_heterogeneity_values, echo = FALSE} zr_heterogeneity <- ManyEcoEvo_viz %>% - dplyr::filter(estimate_type == "Zr", - model_name == "MA_mod", - exclusion_set == "complete", - expertise_subset == "All") %>% + dplyr::filter(estimate_type == "Zr", + model_name == "MA_mod", + exclusion_set == "complete", + expertise_subset == "All") %>% bind_rows(ManyEcoEvo_viz %>% dplyr::filter(exclusion_set == "complete-rm_outliers", estimate_type == "Zr", @@ -3151,8 +3080,8 @@ zr_heterogeneity <- ManyEcoEvo_viz %>% publishable_subset, expertise_subset, collinearity_subset)) %>% - mutate(publishable_subset = - case_when(publishable_subset == "All" & expertise_subset == "All" ~ + mutate(publishable_subset = + case_when(publishable_subset == "All" & expertise_subset == "All" & collinearity_subset == "All"~ "All analyses", publishable_subset == "data_flawed" & expertise_subset == "All" ~ "Analyses receiving at least one 'Unpublishable' rating removed", @@ -3163,10 +3092,9 @@ zr_heterogeneity <- ManyEcoEvo_viz %>% TRUE ~ ""), exclusion_set = case_when(exclusion_set == "complete" ~ "", - TRUE ~ ", Outliers removed"), - collinearity_subset = dplyr::na_if(collinearity_subset, "All")) %>% + TRUE ~ ", Outliers removed")) %>% ungroup %>% - select(-expertise_subset) %>% + select(-expertise_subset, -collinearity_subset) %>% unite("data_subset", publishable_subset, exclusion_set, sep = "", remove = TRUE) %>% group_by(data_subset) %>% arrange(data_subset, dataset) %>% @@ -3177,14 +3105,14 @@ zr_heterogeneity <- ManyEcoEvo_viz %>% mutate(tau_total = sum(sigma2_1, sigma2_2)) %>% relocate(tau_total, .after = starts_with("sigma")) -bt_MA_summ_stats <- zr_heterogeneity %>% filter(dataset == "blue tit", data_subset == "All analyses", is.na(collinearity_subset)) -euc_MA_summ_stats <- zr_heterogeneity %>% filter(dataset == "eucalyptus", data_subset == "All analyses", is.na(collinearity_subset)) +bt_MA_summ_stats <- zr_heterogeneity %>% filter(dataset == "blue tit", data_subset == "All analyses") +euc_MA_summ_stats <- zr_heterogeneity %>% filter(dataset == "eucalyptus", data_subset == "All analyses") ``` We quantified both absolute ($\tau^{2}$) and relative ($I^{2}$) heterogeneity resulting from analytical variation. Both measures suggest that substantial variability among effect sizes was attributable to the analytical decisions of analysts. -The total absolute level of variance beyond what would typically be expected due to sampling error, $\tau^{2}$ (@tbl-effects-heterogeneity), among all usable blue tit effects was `r round(bt_MA_summ_stats$tau_total, digits = 2)` and for *Eucalyptus* effects was `r round(euc_MA_summ_stats$tau_total, digits = 2)`. +The total absolute level of variance beyond what would typically be expected due to sampling error, $\tau^{2}$ (@tbl-effects-heterogeneity), among all usable blue tit effects was `r round_pluck(bt_MA_summ_stats, "tau_total")` and for *Eucalyptus* effects was `r round_pluck(euc_MA_summ_stats, "tau_total")`. This is similar to or exceeding the median value (0.105) of $\tau^{2}$ found across 31 recent meta-analyses [calculated from the data in @yang2023]. The similarity of our observed values to values from meta-analyses of different studies based on different data suggest the potential for a large portion of heterogeneity to arise from analytical decisions. For further discussion of interpretation of $\tau^{2}$ in our study, please consult discussion of *post hoc* analyses below. @@ -3200,63 +3128,55 @@ zr_heterogeneity %>% group_by( data_subset) %>% arrange(dataset) %>% dplyr::relocate(tau_total, .after = data_subset) %>% + dplyr::relocate("Nobs", .after = dataset) %>% gt::gt() %>% - gt::sub_missing(columns = "collinearity_subset", rows = everything(), "") %>% - gt::fmt("collinearity_subset", fns = function(x) str_replace(x, "_", " ") %>% - str_c(", ", .)) %>% gt::fmt_number(columns = c(-"dataset", -"dataset", -"data_subset", -starts_with("I2"), -Nobs), decimals = 2) %>% - gt::fmt_percent(columns = "I2_Total", scale_values = FALSE, decimals = 3) %>% + gt::fmt_percent(columns = "I2_Total", scale_values = FALSE, decimals = 2) %>% gt::fmt_percent(columns = starts_with("I2_TeamIdentifier"), scale_values = FALSE, decimals = 2) %>% gt::opt_stylize(style = 6, color = "gray") %>% - gt::cols_label(dataset="Dataset", - I2_Total = "$${I^2}_\\text{Total}$$", - I2_TeamIdentifier = "$${I^2}_\\text{Team}$$", - `I2_TeamIdentifier/study_id` = "$${I^2}_\\text{Team, effectID}$$" , - sigma2_1 = "$${\\tau}_\\text{Team}^{2}$$", - sigma2_2 = "$${\\tau}_\\text{effectID}^{2}$$", - tau_total = "$${\\tau}_\\text{Total}^{2}$$", - Nobs = "N.Obs" + gt::cols_label(dataset= gt::md("Dataset"), + I2_Total = gt::md("$${I^2}_\\text{Total}$$"), + I2_TeamIdentifier = gt::md("$${I^2}_\\text{Team}$$"), + `I2_TeamIdentifier/study_id` = gt::md("$${I^2}_\\text{Team, effectID}$$"), + sigma2_1 = gt::md("$${\\tau}_\\text{Team}^{2}$$"), + sigma2_2 = gt::md("$${\\tau}_\\text{effectID}^{2}$$"), + tau_total = gt::md("$${\\tau}_\\text{Total}^{2}$$"), + Nobs = gt::md("$${N}_\\text{Obs}$$") ) %>% - gt::cols_merge(columns = c(dataset, collinearity_subset), - hide_columns =c(collinearity_subset)) %>% gt::row_group_order(groups = c("All analyses", + "Blue tit analyses containing highly collinear predictors removed", "All analyses, Outliers removed", "Analyses receiving at least one 'Unpublishable' rating removed", "Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed", "Analyses from teams with highly proficient or expert data analysts")) %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), - style = cell_text(style = "italic")) %>% - gt::as_raw_html() + style = cell_text(style = "italic")) ``` In our analyses, $I^{2}$ is a plausible index of how much more variability among effect sizes we have observed, as a proportion, than we would have observed if sampling error were driving variability. We discuss our interpretation of $I^{2}$ further in the methods, but in short, it is a useful metric for comparison to values from published meta-analyses and provides a plausible value for how much heterogeneity could arise in a normal meta-analysis with similar sample sizes due to analytical variability alone. -In our study, total $I^{2}$ for the blue tit $Z_r$ estimates was extremely large, at `r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "blue tit", !!!filter_vars_main_analysis) %>% pluck("MA_fit_stats") %>% flatten_df() %>% pluck("I2_Total") %>% round(2)`%, as was the *Eucalyptus* estimate (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% pluck("MA_fit_stats") %>% flatten_df() %>% pluck("I2_Total") %>% round(2)`% @tbl-effects-heterogeneity). +In our study, total $I^{2}$ for the blue tit $Z_r$ estimates was extremely large, at `r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "blue tit", !!!filter_vars_main_analysis) %>% hoist(MA_fit_stats, "I2_Total") %>% round_pluck("I2_Total")`%, as was the *Eucalyptus* estimate (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% hoist(MA_fit_stats, "I2_Total") %>% round_pluck("I2_Total") `% @tbl-effects-heterogeneity). Although the overall $I^{2}$ values were similar for both *Eucalyptus* and blue tit analyses, the relative composition of that heterogeneity differed. -For both datasets, the majority of heterogeneity in $Z_r$ was driven by differences among effects as opposed to differences among teams, though this was more prominent for the *Eucalyptus* dataset, where nearly all of the total heterogeneity was driven by differences among effects (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% pluck("MA_fit_stats") %>% flatten_df() %>% pluck("I2_TeamIdentifier/study_id") %>% round(2)`%) as opposed to differences among teams (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% pluck("MA_fit_stats") %>% flatten_df() %>% pluck("I2_TeamIdentifier") %>% round(2)`%) (@tbl-effects-heterogeneity). +For both datasets, the majority of heterogeneity in $Z_r$ was driven by differences among effects as opposed to differences among teams, though this was more prominent for the *Eucalyptus* dataset, where nearly all of the total heterogeneity was driven by differences among effects (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% hoist(MA_fit_stats, "I2_TeamIdentifier/study_id") %>% round_pluck("I2_TeamIdentifier/study_id") `%) as opposed to differences among teams (`r ManyEcoEvo_viz %>% dplyr::filter(model_name == "MA_mod", dataset == "eucalyptus", !!!filter_vars_main_analysis) %>% hoist(MA_fit_stats, "I2_TeamIdentifier") %>% round_pluck("I2_TeamIdentifier")`%) (@tbl-effects-heterogeneity). ### Out-of-sample predictions ($y_{i}$) ```{r heterogeneity-reporting-data, echo = FALSE, warning=FALSE, message = FALSE} -heterogeneity_data_yi <- ManyEcoEvo_yi_results %>% - filter(exclusion_set == "complete", dataset == "blue tit") %>% - group_by(estimate_type, dataset) %>% - select(MA_mod, -effects_analysis, -exclusion_set) %>% - bind_rows(., {mod_data_logged %>% select(-data) %>% mutate(dataset = "eucalyptus")}) %>% - transmute(tidy_mod_summary = map(MA_mod, - ~ broom::glance(.x, )), - MA_fit_stats = map(MA_mod, get_MA_fit_stats)) %>% - unnest(c(tidy_mod_summary, MA_fit_stats)) %>% - group_by(dataset) %>% - select(-logLik, -deviance, -tau.squared, -AIC, -BIC, -AICc, -cochran.qm, -p.value.cochran.qm) +heterogeneity_data_yi <- ManyEcoEvo_yi_viz %>% + filter(model_name == "MA_mod") %>% + group_by(dataset, estimate_type) %>% + select(mod_glance, MA_fit_stats) %>% + unnest(cols = c(mod_glance, MA_fit_stats)) %>% + group_by(dataset) %>% + select(-logLik, -deviance, -tau.squared, -AIC, -BIC, -AICc, -cochran.qm, -p.value.cochran.qm, -c(cochran.qe, p.value.cochran.qe, df.residual)) ``` We observed substantial heterogeneity among out-of-sample estimates, but the pattern differed somewhat from the $Z_r$ values (@tbl-yi-heterogeneity). @@ -3268,30 +3188,9 @@ We are limited in our interpretation of $\tau^{2}$ for these estimates because, ```{r tbl-yi-heterogeneity, message = FALSE, results = 'asis', echo = FALSE} #| label: tbl-yi-heterogeneity #| tbl-cap: "Heterogeneity among the out-of-sample predictions ${y}_{i}$ for both blue tit and *Eucalyptus* datasets. ${\\tau}_\\text{Team}^{2}$ is the absolute heterogeneity for the random effect `Team`. ${\\tau}_\\text{effectID}^{2}$ is the absolute heterogeneity for the random effect `effectID` nested under `Team`. `effectID` is the unique identifier assigned to each individual statistical effect submitted by an analysis team. We nested `effectID` within analysis team identity (`Team`) because analysis teams often submitted >1 statistical effect, either because they considered >1 model or because they derived >1 effect per model, especially when a model contained a factor with multiple levels that produced >1 contrast. ${\\tau}_\\text{Total}^{2}$ is the total absolute heterogeneity. ${I}_\\text{Total}^{2}$ is the proportional heterogeneity; the proportion of the variance among effects not attributable to sampling error, ${I}_\\text{Team}^{2}$ is the subset of the proportional heterogeneity due to differences among `Teams` and ${I}_\\text{Team,effectID}^{2}$ is subset of the proportional heterogeneity attributable to among-`effectID` differences." -ManyEcoEvo_yi_results %>% - filter(exclusion_set == "complete", - dataset == "blue tit") %>% - group_by(estimate_type, dataset) %>% - select(MA_mod, -effects_analysis, -exclusion_set) %>% - bind_rows(., {mod_data_logged %>% - select(-data) %>% - mutate(dataset = "eucalyptus")}) %>% - transmute(tidy_mod_summary = map(MA_mod, - ~ broom::glance(.x, )), - MA_fit_stats = map(MA_mod, get_MA_fit_stats)) %>% - unnest(c(tidy_mod_summary, MA_fit_stats)) %>% - select(-logLik, - -deviance, - -tau.squared, - -AIC, - -BIC, - -AICc, - -cochran.qm, - -p.value.cochran.qm, - -c(cochran.qe, p.value.cochran.qe, df.residual)) %>% - mutate(tau_total = sum(sigma2_1, sigma2_2), - dataset = case_when(str_detect(dataset, "eucalyptus") ~ "*Eucalyptus*", - TRUE ~ dataset)) %>% +heterogeneity_data_yi %>% mutate(tau_total = sum(sigma2_1, sigma2_2), + dataset = case_when(str_detect(dataset, "eucalyptus") ~ "*Eucalyptus*", + TRUE ~ dataset)) %>% relocate(tau_total, .after = starts_with("nobs")) %>% group_by(dataset) %>% gt::gt() %>% @@ -3300,23 +3199,21 @@ ManyEcoEvo_yi_results %>% -"dataset", -starts_with("I2")), decimals = 2) %>% - gt::fmt(columns = starts_with("p.value"), - fns = function(x) gtsummary::style_pvalue(x)) %>% gt::fmt_percent(columns = "I2_Total", scale_values = FALSE, decimals = 3) %>% - gt::fmt_percent(columns = starts_with("I2_"), scale_values = FALSE, decimals = 2) %>% + gt::fmt_percent(columns = starts_with("I2_"), scale_values = FALSE, decimals = 2,drop_trailing_zeros = T, drop_trailing_dec_mark = T) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::cols_label(dataset = "Dataset", tau_total = gt::md("$${\\tau}_\\text{Total}$$"), I2_Total = gt::md("$${I}_\\text{Total}^{2}$$"), I2_TeamIdentifier = gt::md("$${I}_\\text{Team}^{2}$$"), - `I2_TeamIdentifier/study_id` = gt::md("$${I}_{Team, effectID}^{2}$$") , + `I2_TeamIdentifier/study_id` = gt::md("$${I}_{\\text{Team, effectID}}^{2}$$") , sigma2_1 = gt::md("$${\\tau}_\\text{Team}^{2}$$"), sigma2_2 = gt::md("$${\\tau}_\\text{effectID}^{2}$$"), nobs = gt::md("$${N}_\\text{Obs}$$"), estimate_type = gt::md("Prediction Scenario")) %>% gt::text_transform(fn = function(x) map(x, gt::md), locations = gt::cells_row_groups()) %>% - gt::as_raw_html() + gt_fmt_yi(columns = "estimate_type") ``` @@ -3336,11 +3233,11 @@ min_outlier_euc <- complete_euc_data %>% pull(term) sample_size_euc_Zr <- ManyEcoEvo_results %>% - filter(exclusion_set == "complete", dataset == "eucalyptus") %>% - pluck("data", 1) %>% - select(id_col, sample_size) %>% - rename(term = id_col) %>% - mutate(sample_size = as.numeric(sample_size)) + filter(exclusion_set == "complete", dataset == "eucalyptus") %>% + pluck("data", 1) %>% + select(id_col, sample_size) %>% + rename(term = id_col) %>% + mutate(sample_size = as.numeric(sample_size)) mean_n_euc_Zr <- sample_size_euc_Zr %>% drop_na(sample_size) %>% @@ -3350,15 +3247,13 @@ mean_n_euc_Zr <- sample_size_euc_Zr %>% N_outliers_Zr_euc <- sample_size_euc_Zr %>% filter(term %in% min_outlier_euc) %>% - arrange(desc(sample_size)) - - + arrange(desc(sample_size)) ``` The outlier *Eucalyptus* $Z_r$ values were striking and merited special examination. The three negative outliers had very low sample sizes were based on either small subsets of the dataset or, in one case, extreme aggregation of data. -The outliers associated with small subsets had sample sizes ($n=$ `r N_outliers_Zr_euc$sample_size %>% paste0(collapse = ", ")`) that were less than half of the total possible sample size of `r nrow(ManyEcoEvo::euc_data)`. -The case of extreme aggregation involved averaging all values within each of the `r n_distinct(ManyEcoEvo::euc_data, "Property")` sites in the dataset. +The outliers associated with small subsets had sample sizes ($n=$ `r N_outliers_Zr_euc$sample_size %>% paste0(collapse = ", ")`) that were less than half of the total possible sample size of `r nrow(euc_data)`. +The case of extreme aggregation involved averaging all values within each of the `r n_distinct(euc_data, "Property")` sites in the dataset. ```{r} #| label: bt-Zr-outliers @@ -3366,6 +3261,8 @@ The case of extreme aggregation involved averaging all values within each of the #| warning: false #| message: false +rm(euc_data) + max_outlier_bt <- bt_complete_data %>% filter(type == "study") %>% slice_max(estimate) %>% @@ -3430,16 +3327,17 @@ Thus, effects at the extremes of the distribution were much stronger contributor #| tbl-cap: 'Estimated mean value of the standardised correlation coefficient, $Z_r$, along with its standard error and 95$\\%$confidence intervals. We re-computed the meta-analysis for different post-hoc subsets of the data: All eligible effects, removal of effects from blue tit analyses that contained a pair of highly collinear predictor variables, removal of effects from analysis teams that received at least one peer rating of "deeply flawed and unpublishable", removal of any effects from analysis teams that received at least one peer rating of either "deeply flawed and unpublishable" or "publishable with major revisions", inclusion of only effects from analysis teams that included at least one member who rated themselves as "highly proficient" or "expert" at conducting statistical analyses in their research area.' #| echo: false effectsparams <- ManyEcoEvo_viz %>% - dplyr::filter(estimate_type == "Zr", - model_name == "MA_mod", - exclusion_set == "complete") %>% + dplyr::filter(estimate_type == "Zr", + model_name == "MA_mod", + exclusion_set == "complete") %>% bind_rows(ManyEcoEvo_viz %>% dplyr::filter(exclusion_set == "complete-rm_outliers", estimate_type == "Zr", model_name == "MA_mod", publishable_subset == "All")) %>% hoist(tidy_mod_summary) %>% - unnest(tidy_mod_summary) + unnest(tidy_mod_summary) %>% + filter(type == "summary") effectsparams %>% select(dataset, @@ -3453,7 +3351,7 @@ effectsparams %>% p.value, starts_with("conf")) %>% mutate(publishable_subset = - case_when(publishable_subset == "All" & expertise_subset == "All" ~ + case_when(publishable_subset == "All" & expertise_subset == "All" & collinearity_subset == "All" ~ "All analyses", publishable_subset == "data_flawed" & expertise_subset == "All" ~ "Analyses receiving at least one 'Unpublishable' rating removed", @@ -3466,9 +3364,8 @@ effectsparams %>% case_when(exclusion_set == "complete" ~ "", TRUE ~ ", outliers removed"), dataset = case_when(str_detect(dataset, "euc") ~ "Eucalyptus", - TRUE ~ dataset), - collinearity_subset = dplyr::na_if(collinearity_subset, "All")) %>% - select(-expertise_subset) %>% + TRUE ~ dataset)) %>% + select(-expertise_subset, -collinearity_subset) %>% unite("data_subset", publishable_subset, exclusion_set, sep = "", remove = TRUE) %>% group_by(data_subset) %>% arrange(data_subset, dataset) %>% @@ -3477,23 +3374,23 @@ effectsparams %>% gt::fmt(columns = "p.value", fns = function(x) gtsummary::style_pvalue(x)) %>% gt::fmt_number(columns = c(-p.value, -dataset)) %>% - gt::sub_missing(columns = "collinearity_subset", rows = everything(), "") %>% - gt::fmt("collinearity_subset", fns = function(x) str_replace(x, "_", " ") %>% - str_c(", ", .)) %>% gt::cols_label(dataset = "Dataset", - estimate = "$$\\hat\\mu$$", + estimate = gt::md("$$\\hat\\mu$$"), std.error = gt::md("$$\\text{SE}[\\hat\\mu]$$"), conf.low = gt::md("95\\%CI"), - p.value = "p-value") %>% - gt::cols_merge(columns = c(dataset, collinearity_subset), - hide_columns =c(collinearity_subset)) %>% + p.value = gt::md("*p*")) %>% gt::cols_merge(columns = starts_with("conf"), pattern = "[{1},{2}]") %>% - gt::cols_move(columns = conf.low, after = std.error) %>% + gt::cols_move(columns = conf.low, after = std.error) %>% + gt::row_group_order(groups = c("All analyses", + "Blue tit analyses containing highly collinear predictors removed", + "All analyses, outliers removed", + "Analyses receiving at least one 'Unpublishable' rating removed", + "Analyses receiving at least one 'Unpublishable' and or 'Major Revisions' rating removed", + "Analyses from teams with highly proficient or expert data analysts")) %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), - style = cell_text(style = "italic")) %>% - gt::as_raw_html() + style = cell_text(style = "italic")) ``` ### Out-of-sample predictions ($y_{i}$) @@ -3505,7 +3402,7 @@ We did not conduct these *post hoc* analyses on the out-of-sample predictions as ### Effect Sizes ($Z_r$) Removing poorly rated analyses had limited impact on the meta-analytic means (@fig-all-forest-plots-Zr). -For the *Eucalyptus* dataset, the meta-analytic mean shifted from `r filter(effectsparams, dataset == "eucalyptus", publishable_subset == "All") %>% round_pluck("estimate")` to `r filter(effectsparams, dataset == "eucalyptus", publishable_subset == "data_flawed") %>% round_pluck("estimate")` when effects from analyses rated as unpublishable were removed, and to `r filter(effectsparams, dataset == "eucalyptus", publishable_subset == "data_flawed_major") %>% round_pluck("estimate")` when effects from analyses rated, at least once, as unpublishable or requiring major revisions were removed. +For the *Eucalyptus* dataset, the meta-analytic mean shifted from `r filter(effectsparams, !!!filter_vars_main_analysis, dataset == "eucalyptus") %>% round_pluck("estimate")` to `r filter(effectsparams, dataset == "eucalyptus", publishable_subset == "data_flawed") %>% round_pluck("estimate")` when effects from analyses rated as unpublishable were removed, and to `r filter(effectsparams, dataset == "eucalyptus", publishable_subset == "data_flawed_major") %>% round_pluck("estimate")` when effects from analyses rated, at least once, as unpublishable or requiring major revisions were removed. Further, the confidence intervals for all of these means overlapped each of the other means (@tbl-effects-params). We saw similar patterns for the blue tit dataset, with only small shifts in the meta-analytic mean, and confidence intervals of all three means overlapping each other mean (@tbl-effects-params). Refitting the meta-analysis with a fixed effect for categorical ratings also showed no indication of differences in group meta-analytic means due to peer ratings (@fig-euc-cat-ratings-MA). @@ -3549,50 +3446,33 @@ Inclusion of collinear predictors does not harm model prediction, and so we did ## Explaining Variation in Deviation Scores None of the pre-registered predictors explained substantial variation in deviation among submitted statistical effects from the meta-analytic mean (@tbl-model-summary-stats-ratings-cont, @tbl-deviation-rating-estimates). -Note that the extremely high ${R}_\text{Conditional}^{2}$ values from the analyses of continuous peer ratings as predictors of deviation scores are a function of the random effects, not the fixed effect of interest. -These high values of the ${R}_\text{Conditional}^{2}$ result from the fact that each effect size was included in the analysis multiple times, to allow comparison with ratings from the multiple peer reviewers who reviewed each analysis, and therefore when we included effect ID as a random effect, the observations within each random effect category were identical. ```{r} #| label: tbl-model-summary-stats-ratings-cont +#| tbl-cap: "Summary metrics for registered models seeking to explain deviation (Box-Cox transformed absolute deviation scores) from the mean $Z_r$ as a function of Sorensen's Index, categorical peer ratings, and continuous peer ratings for blue tit and *Eucalyptus* analyses, and as a function of the presence or absence of random effects (in the analyst's models) for *Eucalyptus* analyses. We report coefficient of determination, $R^2$, for our models including only fixed effects as predictors of deviation, and we report $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) from our models that included both fixed and random effects. For all our models, we calculated the residual standard deviation $\\sigma$ and root mean squared error (RMSE)." #| echo: false #| message: false -#| column: page-right modelfitstats <- ManyEcoEvo_viz %>% filter(!!!filter_vars_main_analysis, model_name %in% c("box_cox_rating_cont", "box_cox_rating_cat")) %>% - mutate(perf = map(model, performance::performance), - mod_sum_stats = map(model, broom.mixed::glance)) %>% - hoist(perf, "R2_conditional", "R2_marginal", "ICC", "Sigma", "RMSE") %>% - hoist(mod_sum_stats, "nobs") %>% - select(-model, - -mod_summary, - -tidy_mod_summary, - -MA_fit_stats, - -mod_fit_stats, - -ends_with("plot"), - -perf, - -mod_sum_stats, - -estimate_type, - -publishable_subset, - -exclusion_set, - -expertise_subset, - -collinearity_subset) %>% + select(dataset, model_name, mod_fit_stats, mod_glance) %>% + hoist(mod_fit_stats, "R2_conditional", "R2_marginal", "ICC", "Sigma", "RMSE") %>% + hoist(mod_glance, "nobs") %>% + select(-mod_fit_stats, -mod_glance) %>% bind_rows({ ManyEcoEvo_viz %>% filter(!!!filter_vars_main_analysis, model_name %in% c("sorensen_glm", "uni_mixed_effects")) %>% - mutate(model = map(model, extract_fit_engine), - perf = map(model, performance::performance), - mod_sum_stats = map(model, broom.mixed::glance)) %>% - select(dataset, model_name, perf, mod_sum_stats) %>% - hoist(mod_sum_stats, "nobs") %>% - hoist(perf, "R2", "RMSE", "Sigma") %>% - select(-perf, -mod_sum_stats) + select(dataset, model_name, mod_fit_stats, mod_glance) %>% + hoist(mod_glance, "nobs") %>% + hoist(mod_fit_stats, "R2", "RMSE", "Sigma") %>% + select(-mod_fit_stats, -mod_glance) %>% + drop_na() }) %>% group_by(model_name) %>% - relocate("R2", .before = starts_with("R2_")) - + relocate("R2", .before = starts_with("R2_")) %>% + relocate("nobs", .after = dataset) modelfitstats %>% mutate(model_name = forcats::as_factor(model_name), @@ -3610,21 +3490,39 @@ modelfitstats %>% TRUE ~ dataset)) %>% arrange(model_name, dataset) %>% gt::gt() %>% - gt::tab_caption(caption = gt::md("Summary metrics for registered models seeking to explain deviation (Box-Cox transformed absolute deviation scores) from the mean $Z_r$ as a function of Sorensen's Index, categorical peer ratings, and continuous peer ratings for blue tit and *Eucalyptus* analyses, and as a function of the presence or absence of random effects (in the analyst's models) for *Eucalyptus* analyses. We report coefficient of determination, $R^2$, for our models including only fixed effects as predictors of deviation, and we report $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) from our models that included both fixed and random effects. For all our models, we calculated the residual standard deviation $\\sigma$ and root mean squared error (RMSE).")) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::sub_missing(missing_text = "") %>% - gt::fmt(columns = function(x) rlang::is_bare_numeric(x), - fns = function(x) format(x, digits = 3)) %>% gt::cols_label(dataset = "Dataset", - R2 = "$$R^2$$", - R2_conditional = "$${R}_\\text{Conditional}^{2}$$", - R2_marginal = "$${R}_\\text{Marginal}^{2}$$", - Sigma = "$$\\sigma$$", - nobs = "$$N_\\text{Obs.}$$") %>% + R2 = gt::md("$$R^2$$"), + R2_conditional = gt::md("$${R}_\\text{Conditional}^{2}$$"), + R2_marginal = gt::md("$${R}_\\text{Marginal}^{2}$$"), + Sigma = gt::md("$$\\sigma$$"), + nobs = gt::md("$$N_\\text{Obs.}$$")) %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), style = cell_text(style = "italic")) %>% - gt::as_raw_html() + gt::fmt_number(columns = c(-dataset, -model_name), + decimals = 2, + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE) %>% + gt::fmt_scientific(columns = c(Sigma, RMSE), + rows = abs(Sigma) < 0.01 | abs(RMSE) < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2_marginal"), + rows = abs(R2_marginal) < 0.01 | abs(R2_marginal) >1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2_conditional"), + rows = abs(R2_conditional) < 0.01 | abs(R2_conditional) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2"), + rows = abs(R2) < 0.01 | abs(R2) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("ICC"), + rows = abs(ICC) < 0.01 | abs(ICC) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("RMSE"), + rows = abs(RMSE) < 0.01 | abs(RMSE) > 1000, + decimals = 2) ``` ```{r} @@ -3640,21 +3538,21 @@ ManyEcoEvo_viz %>% "sorensen_glm", "uni_mixed_effects")) %>% dplyr::filter(dataset != "blue tit" | str_detect(model_name, "mixed", negate = TRUE)) %>% - mutate(tbl_output = map(model, parameters::parameters), - model_name = forcats::as_factor(model_name) %>% - forcats::fct_relevel(c("box_cox_rating_cat", - "box_cox_rating_cont", - "sorensen_glm", - "uni_mixed_effects")) %>% - forcats::fct_recode(`Deviation explained by categorical ratings` = "box_cox_rating_cat", - `Deviation explained by continuous ratings` = "box_cox_rating_cont", - `Deviation explained by Sorensen's index` = "sorensen_glm", - `Deviation explained by inclusion of random effects` = "uni_mixed_effects") + mutate( + model_name = forcats::as_factor(model_name) %>% + forcats::fct_relevel(c("box_cox_rating_cat", + "box_cox_rating_cont", + "sorensen_glm", + "uni_mixed_effects")) %>% + forcats::fct_recode(`Deviation explained by categorical ratings` = "box_cox_rating_cat", + `Deviation explained by continuous ratings` = "box_cox_rating_cont", + `Deviation explained by Sorensen's index` = "sorensen_glm", + `Deviation explained by inclusion of random effects` = "uni_mixed_effects") ) %>% select(dataset, model_name, - tbl_output) %>% - unnest(tbl_output) %>% + model_params) %>% + unnest(model_params) %>% mutate(dataset = case_when(str_detect(dataset, "eucalyptus") ~ "*Eucalyptus*", TRUE ~ dataset), Group = case_when(Group == "study_id" ~ "Effect ID", @@ -3675,9 +3573,9 @@ ManyEcoEvo_viz %>% gt::fmt(columns = "p", fns = function(x) gtsummary::style_pvalue(x)) %>% gt::cols_label(CI_low = gt::md("95\\%CI")) %>% - gt::cols_label(df_error = "df") %>% + gt::cols_label(df_error = "df", p = gt::md("*p*")) %>% gt::cols_merge(columns = starts_with("CI_"), - pattern = "[{1},{2}]") %>% + pattern = "[{1}, {2}]") %>% gt::cols_move(columns = CI_low, after = SE) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::fmt(columns = c(Coefficient, SE, starts_with("CI_"), t) , @@ -3693,11 +3591,21 @@ ManyEcoEvo_viz %>% missing_text = "") %>% gt::text_transform(fn = function(x) map(x, gt::md), locations = gt::cells_row_groups()) %>% + gt::cols_hide(Effects) %>% + gt::cols_label(Group = "Random Effect") %>% gt::as_raw_html() -p_cat_bt_tidy<-broom.mixed::tidy(ManyEcoEvo::ManyEcoEvo_results$sorensen_glm[[1]],conf.int=T,conf.level=0.95) +p_cat_bt_tidy <- ManyEcoEvo_viz %>% + filter(!!!filter_vars_main_analysis, + model_name == "sorensen_glm", + dataset == "blue tit") %>% + pluck("tidy_mod_summary", 1) -p_cat_euc_tidy<-broom.mixed::tidy(ManyEcoEvo::ManyEcoEvo_results$sorensen_glm[[2]],conf.int=T,conf.level=0.95) +p_cat_euc_tidy <- ManyEcoEvo_viz %>% + filter(!!!filter_vars_main_analysis, + model_name == "sorensen_glm", + dataset == "eucalyptus") %>% + pluck("tidy_mod_summary", 1) ``` ## Deviation Scores as explained by Reviewer Ratings @@ -3713,14 +3621,14 @@ review_data <- ManyEcoEvo_results %>% select(ends_with("_id"), id_col, dataset, review_data) %>% unnest(review_data) %>% distinct(ReviewerId , id_col,.keep_all = T) %>% -#by unfortunate chance two reviewer reviewed the same analyses twice Rev100 for R_2TFbZCxFIz91BGC and Rev133 for R_1hSZf5af3tKfwLI. Fortunately, their ratings were similar so we will just use distinct to remove one above -#reviewers reviewed all analyses per team for each dataset - need to deduplicate to avoid inflating inter-rater reliability + #by unfortunate chance two reviewer reviewed the same analyses twice Rev100 for R_2TFbZCxFIz91BGC and Rev133 for R_1hSZf5af3tKfwLI. Fortunately, their ratings were similar so we will just use distinct to remove one above + #reviewers reviewed all analyses per team for each dataset - need to deduplicate to avoid inflating inter-rater reliability distinct(response_id, ReviewerId , dataset,.keep_all = T) %>% mutate(PublishableAsIs = case_when(PublishableAsIs == "publishable as is" ~ 4, - PublishableAsIs == "publishable with minor revision" ~ 3, - PublishableAsIs == "publishable with major revision" ~ 2, - PublishableAsIs == "deeply flawed and unpublishable" ~ 1, - TRUE ~ NA)) + PublishableAsIs == "publishable with minor revision" ~ 3, + PublishableAsIs == "publishable with major revision" ~ 2, + PublishableAsIs == "deeply flawed and unpublishable" ~ 1, + TRUE ~ NA)) alpha_result_continuous <- review_data %>% select(id_col, ReviewerId, RateAnalysis) %>% @@ -3739,12 +3647,12 @@ alpha_result_ordinal <- review_data %>% ``` We obtained reviews from 153 reviewers who reviewed analyses for a mean of `r mean_reviews_by_reviewer` (range `r min_reviews_by_reviewer` - `r max_reviews_by_reviewer`) analysis teams. -Analyses of the blue tit dataset received a total of `r filter(ArticleReviewSummary, dataset == "blue tit") %>% pluck("sum")` reviews, each was reviewed by a mean of `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("mean")` (SD `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("sd")`, range `r filter(ArticleReviewSummary, dataset == "blue tit") %>% pluck("min")`-`r filter(ArticleReviewSummary, dataset == "blue tit") %>% pluck("max")`) reviewers. -Analyses of the *Eucalyptus* dataset received a total of `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% pluck("sum")` reviews, each was reviewed by a mean of `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% pluck("mean")` (SD `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("sd")`, range `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% pluck("min")`-`r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% pluck("max")`) reviewers. +Analyses of the blue tit dataset received a total of `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("sum")` reviews, each was reviewed by a mean of `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("mean")` ($\text{SD}$ `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("sd")`, range `r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("min")`-`r filter(ArticleReviewSummary, dataset == "blue tit") %>% round_pluck("max")`) reviewers. +Analyses of the *Eucalyptus* dataset received a total of `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("sum")` reviews, each was reviewed by a mean of `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("mean")` ($\text{SD}$ `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("sd")`, range `r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("min")`-`r filter(ArticleReviewSummary, dataset == "eucalyptus") %>% round_pluck("max")`) reviewers. We tested for inter-rater-reliability to examine how similarly reviewers reviewed each analysis and found approximately no agreement among reviewers. -When considering continuous ratings, IRR was `r round(alpha_result_continuous$value,2)`, and for categorical ratings, IRR was `r round(alpha_result_ordinal$value,2)`. +When considering continuous ratings, IRR was `r round(alpha_result_continuous$value,2)`, and for categorical ratings, IRR was `r round(alpha_result_ordinal$value, 2)`. -Many of the models of deviance as a function of peer ratings faced issues of failure to converge or singularity due to sparse design matrices with our pre-registered random effects (`Effect_Id` and `Reviewer_ID`) ([see supplementary material -@tbl-explore-Zr-deviation-random-effects-structure]). +Many of the models of deviation as a function of peer ratings faced issues of failure to converge or singularity due to sparse design matrices with our pre-registered random effects (`Effect_Id` and `Reviewer_ID`) ([see supplementary material -@tbl-explore-Zr-deviation-random-effects-structure]). These issues persisted after increasing the tolerance and changing the optimizer. For both *Eucalyptus* and blue tit datasets, models with continuous ratings as a predictor were singular when both pre-registered random effects were included. @@ -3760,6 +3668,7 @@ We re-ran the multi-level meta-analysis with a fixed-effect for the categorical #| label: fig-cat-peer-rating #| echo: false #| message: false +#| warning: false #| fig-cap: "Violin plot of Box-Cox transformed deviation from meta-analytic mean $Z_r$ as a function of categorical peer rating. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95$\\%$CI of the estimate. **A** Blue tit dataset, **B** *Eucalyptus* dataset." #| fig-height: 8 #| fig-width: 10 @@ -3786,11 +3695,11 @@ plot_model_means_box_cox_cat <- function(dat, as_tibble() %>% mutate(lambda = dat$lambda %>% unique()) %>% mutate(across(.cols = -PublishableAsIs, - ~ sae::bxcx(unique(dat$lambda),x = .x, InverseQ = TRUE))) + ~ sae::bxcx(unique(dat$lambda),x = .x, InverseQ = TRUE))) } p <- ggplot(dat, aes(x = {{variable}}, - y = box_cox_abs_deviation_score_estimate)) + + y = box_cox_abs_deviation_score_estimate)) + # Add base dat geom_violin(aes(fill = {{variable}}), trim = TRUE, @@ -3817,9 +3726,9 @@ plot_model_means_box_cox_cat <- function(dat, EnvStats::stat_n_text() + see::theme_modern() + theme(axis.text.x = element_text(angle = 90)) #+ - # ggtitle(label = title) + # ggtitle(label = title) - if(back_transform == TRUE){ + if (back_transform == TRUE) { p <- p + labs(x = "Categorical Peer Review Rating", y = "Absolute Deviation from\n Meta-Anaytic Mean Zr") @@ -3851,31 +3760,31 @@ ManyEcoEvo_results %>% plot_name = paste(exclusion_set, dataset, sep = ", ")) %>% mutate(model_data = map(model_data, .f = ~ mutate(.x, PublishableAsIs = - str_replace(PublishableAsIs, - "publishable with ", "") %>% - str_replace("deeply flawed and ", "") %>% - capwords())), + str_replace(PublishableAsIs, + "publishable with ", "") %>% + str_replace("deeply flawed and ", "") %>% + capwords())), predictor_means = map(predictor_means, - .f = ~ mutate(.x, PublishableAsIs = - str_replace(PublishableAsIs, - "publishable with ", "") %>% - str_replace("deeply flawed and ", "") %>% - capwords()))) %>% + .f = ~ mutate(.x, PublishableAsIs = + str_replace(PublishableAsIs, + "publishable with ", "") %>% + str_replace("deeply flawed and ", "") %>% + capwords()))) %>% mutate(plot_name = str_remove(plot_name, "complete, ") %>% str_replace_all(., " ", "_") %>% paste0("_violin_plot")) %>% pwalk(.l = list(.$model_data, .$predictor_means, .$plot_name), - .f = ~ plot_model_means_box_cox_cat(..1, - PublishableAsIs, - ..2, - new_order = - c("Unpublishable", - "Major Revision", - "Minor Revision", - "Publishable As Is"), - ..3) %>% - assign(x = ..3, value = ., envir = .GlobalEnv)) + .f = ~ plot_model_means_box_cox_cat(..1, + PublishableAsIs, + ..2, + new_order = + c("Unpublishable", + "Major Revision", + "Minor Revision", + "Publishable As Is"), + ..3) %>% + assign(x = ..3, value = ., envir = .GlobalEnv)) library(patchwork) blue_tit_violin_plot + @@ -3894,7 +3803,7 @@ Some models of the influence of reviewer ratings on out-of-sample predictions ($ ### Effect Sizes ($Z_r$) We employed Sorensen's index to calculate the distinctiveness of the set of predictor variables used in each model (@fig-sorensen-plots). -The mean Sorensen's score for blue tit analyses was `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("mean")` (SD: `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("sd")`, range `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("min")`-`r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("max")`), and for *Eucalyptus* analyses was `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("mean")` (SD: `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("sd")`, range `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("min")`-`r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("max")`). +The mean Sorensen's score for blue tit analyses was `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("mean")` ($\text{SD}$: `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("sd")`, range `r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("min")`-`r filter(SorensenSummary, subset_name == "effects", dataset == "blue tit") %>% round_pluck("max")`), and for *Eucalyptus* analyses was `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("mean")` ($\text{SD}$: `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("sd")`, range `r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("min")`-`r filter(SorensenSummary, subset_name == "effects", dataset == "eucalyptus") %>% round_pluck("max")`). We found no meaningful relationship between distinctiveness of variables selected and deviation from the meta-analytic mean (@tbl-deviation-rating-estimates, @fig-sorensen-plots) for either blue tit (mean `r filter(p_cat_bt_tidy, term == "mean_diversity_index") %>% round_pluck("estimate")`, 95$\%$CI `r filter(p_cat_bt_tidy, term == "mean_diversity_index") %>% round_pluck("conf.low")`,`r filter(p_cat_bt_tidy, term == "mean_diversity_index") %>% round_pluck("conf.high")`) or *Eucalyptus* effects (mean `r filter(p_cat_euc_tidy, term == "mean_diversity_index") %>% round_pluck("estimate")`, 95$\%$CI `r filter(p_cat_euc_tidy, term == "mean_diversity_index") %>% round_pluck("conf.low")`,`r filter(p_cat_euc_tidy, term == "mean_diversity_index") %>% round_pluck("conf.high")`). @@ -3970,37 +3879,37 @@ There was no relationship between random-effect inclusion and deviation from met #TODO move function to R/ and document roxygen plot_model_means_RE <- function(data, variable, predictor_means) { p <- ggplot(data, aes(x = as.factor({{variable}}), - y = box_cox_abs_deviation_score_estimate)) + - # Add base data - geom_violin(aes(fill = as.factor({{variable}})), color = "white") + - see::geom_jitter2(width = 0.05, alpha = 0.5) + - # Add pointrange and line from means - geom_line(data = predictor_means, aes(y = Mean, group = 1), size = 1) + - geom_pointrange( - data = predictor_means, - aes(y = Mean, ymin = CI_low, ymax = CI_high), - size = 1, - color = "white" - ) + - # Improve colors - scale_x_discrete(labels = c("0" = "No Random Effects", "1" = "Random Effects")) + - see::scale_fill_material(palette = "ice", - discrete = TRUE, - labels = c("No Random Effects", "Random effects"), - name = "") + - see::theme_modern() + + y = box_cox_abs_deviation_score_estimate)) + + # Add base data + geom_violin(aes(fill = as.factor({{variable}})), color = "white") + + see::geom_jitter2(width = 0.05, alpha = 0.5) + + # Add pointrange and line from means + geom_line(data = predictor_means, aes(y = Mean, group = 1), size = 1) + + geom_pointrange( + data = predictor_means, + aes(y = Mean, ymin = CI_low, ymax = CI_high), + size = 1, + color = "white" + ) + + # Improve colors + scale_x_discrete(labels = c("0" = "No Random Effects", "1" = "Random Effects")) + + see::scale_fill_material(palette = "ice", + discrete = TRUE, + labels = c("No Random Effects", "Random effects"), + name = "") + + see::theme_modern() + EnvStats::stat_n_text() + - labs(x = "Random Effects Included", - y = "Box Cox Transformed \n Absolute Deviation Score") + labs(x = "Random Effects Included", + y = "Box Cox Transformed \n Absolute Deviation Score") return(p) } ManyEcoEvo_viz %>% filter(model_name == "uni_mixed_effects", dataset == "eucalyptus", - !!!filter_vars_main_analysis) %>% - left_join(ManyEcoEvo_results %>% - select(effects_analysis)) %>% + !!!filter_vars_main_analysis) %>% + left_join(ManyEcoEvo_results, + by = join_by(exclusion_set, dataset, publishable_subset, expertise_subset, collinearity_subset, estimate_type)) %>% ungroup() %>% select(exclusion_set, dataset, estimate_type, effects_analysis, model) %>% mutate(predictor_means = map(model, .f = ~ pluck(.x, "fit") %>% @@ -4030,7 +3939,7 @@ When a large pool of ecologists and evolutionary biologists analyzed the same tw Although the variability in analytical outcomes was high for both datasets, the patterns of this variability differed distinctly between them. For the blue tit dataset, there was nearly continuous variability across a wide range of $Z_r$ values. In contrast, for the *Eucalyptus* dataset, there was less variability across most of the range, but more striking outliers at the tails. -Among out-of-sample predictions, there was again almost continuous variation across a wide range (2 SD) among blue tit estimates. +Among out-of-sample predictions, there was again almost continuous variation across a wide range (2 $\text{SD}$) among blue tit estimates. For *Eucalyptus*, out-of-sample predictions were also notably variable, with about half the predicted stem count values at \<2 but the other half being much larger, and ranging to nearly 40 stems per 15 m x 15 m plot. We investigated several hypotheses for drivers of this variability within datasets, but found little support for any of these. Most notably, even when we excluded analyses that had received one or more poor peer reviews, the heterogeneity in results largely persisted. @@ -4225,32 +4134,32 @@ All authors read and approved the final manuscript. ```{r} #| label: tbl-grateful-pkg-list #| echo: false -#| tbl-cap: "R packages used to generate this manuscript. Please see the ManyEcoEvo package for a full list of packages used in the analysis pipeline." +#| tbl-cap: "R packages used to generate this manuscript. Please see the `ManyEcoEvo::` package for a full list of packages used in the analysis pipeline." # pkgs <- cite_packages(output = "table", out.dir = here::here("ms/")) # knitr::kable(pkgs) #NOTE manually edited grateful-refs.bib and updated pkgs below due to bug data.frame( stringsAsFactors = FALSE, - Package = c("base","betapart", - "broom.mixed","colorspace","cowplot","devtools","EnvStats", - "GGally","ggforestplot","ggh4x","ggpubr","ggrepel", - "ggthemes","glmmTMB","gt","gtsummary","here","Hmisc", - "irr","janitor","knitr","latex2exp","lme4","ManyEcoEvo", - "metafor","modelbased","multilevelmod","MuMIn", - "naniar","NatParksPalettes","orchaRd","parameters", - "patchwork","performance","renv","rmarkdown","sae", - "scales","see","showtext","specr","targets","tidymodels", - "tidytext","tidyverse","withr","xfun"), - Version = c("4.4.0","1.6","0.2.9.5", - "2.1.0","1.1.3","2.4.5","2.8.1","2.2.1","0.1.0","0.2.8", - "0.6.0","0.9.5","5.1.0","1.1.8","0.10.1","1.7.2", - "1.0.1","5.1.2","0.84.1","2.2.0","1.46","0.9.6", - "1.1.35.3","1.1.0","4.6.0","0.8.7","1.0.0","1.47.5", - "1.1.0","0.2.0","2.0","0.21.7","1.2.0","0.11.0", - "1.0.2","2.27","1.3","1.3.0","0.8.4","0.9.7","1.0.0", - "1.7.0","1.1.1","0.4.2","2.0.0","3.0.0","0.44"), - Citation = c("@base", "@betapart", "@broommixed", "@colorspace2020a", "@cowplot", "@devtools", "@EnvStats-book", "@GGally", "@ggforestplot", "@ggh4x", "@ggpubr", "@ggrepel", "@ggthemes", "@glmmTMB", "@gt", "@gtsummary", "@here", "@Hmisc", "@irr", "@janitor", "@knitr2024", "@latex2exp", "@lme4", "@ManyEcoEvo", "@metafor", "@modelbased", "@multilevelmod", "@MuMIn", "@naniar", "@NatParksPalettes", "@orchaRd", "@parameters", "@patchwork", "@performance", "@renv", "@rmarkdown2024", "@molina-marhuenda:2015", "@scales", "@see", "@showtext", "@specr", "@targets", "@tidymodels", "@tidytext", "@tidyverse", "@withr", "@xfun") + Package = c("base","betapart", + "broom.mixed","colorspace","cowplot","devtools","EnvStats", + "GGally","ggforestplot","ggh4x","ggpubr","ggrepel", + "ggthemes","glmmTMB","gt","gtsummary","here","Hmisc", + "irr","janitor","knitr","latex2exp","lme4","ManyEcoEvo", + "metafor","modelbased","multilevelmod","MuMIn", + "naniar","NatParksPalettes","orchaRd","parameters", + "patchwork","performance","renv","rmarkdown","sae", + "scales","see","showtext","specr","targets","tidymodels", + "tidytext","tidyverse","withr","xfun"), + Version = c("4.4.0","1.6","0.2.9.5", + "2.1.0","1.1.3","2.4.5","2.8.1","2.2.1","0.1.0","0.2.8", + "0.6.0","0.9.5","5.1.0","1.1.8","0.10.1","1.7.2", + "1.0.1","5.1.2","0.84.1","2.2.0","1.46","0.9.6", + "1.1.35.3","1.1.0","4.6.0","0.8.7","1.0.0","1.47.5", + "1.1.0","0.2.0","2.0","0.21.7","1.2.0","0.11.0", + "1.0.2","2.27","1.3","1.3.0","0.8.4","0.9.7","1.0.0", + "1.7.0","1.1.1","0.4.2","2.0.0","3.0.0","0.44"), + Citation = c("@base", "@betapart", "@broommixed", "@colorspace2020a", "@cowplot", "@devtools", "@EnvStats-book", "@GGally", "@ggforestplot", "@ggh4x", "@ggpubr", "@ggrepel", "@ggthemes", "@glmmTMB", "@gt", "@gtsummary", "@here", "@Hmisc", "@irr", "@janitor", "@knitr2024", "@latex2exp", "@lme4", "@ManyEcoEvo", "@metafor", "@modelbased", "@multilevelmod", "@MuMIn", "@naniar", "@NatParksPalettes", "@orchaRd", "@parameters", "@patchwork", "@performance", "@renv", "@rmarkdown2024", "@molina-marhuenda:2015", "@scales", "@see", "@showtext", "@specr", "@targets", "@tidymodels", "@tidytext", "@tidyverse", "@withr", "@xfun") ) %>% knitr::kable() ``` diff --git a/ms/references.bib b/ms/references.bib index 5b63d4a..2430e6a 100644 --- a/ms/references.bib +++ b/ms/references.bib @@ -757,11 +757,6 @@ @article{munoz2018 type = {Journal Article} } - - - - - @article{nakagawa2007, author = {Nakagawa, Shinichi and Cuthill, Innes C.}, title = {Effect size, confidence interval and statistical significance: a practical guide for biologists}, @@ -814,6 +809,16 @@ @article{nakagawa2022 type = {Journal Article} } +@article{nakagawa2023, + title={Quantitative evidence synthesis: a practical guide on meta-analysis, meta-regression, and publication bias tests for environmental sciences}, volume={12}, ISSN={2047-2382}, url={https://doi.org/10.1186/s13750-023-00301-6}, + DOI={10.1186/s13750-023-00301-6}, + number={1}, + journal={Environmental Evidence}, + author={Nakagawa, Shinichi and Yang, Yefeng and Macartney, Erin L. and Spake, Rebecca and Lagisz, Malgorzata}, + year={2023}, + month=apr, + pages={8} +} @article{nicolaus2009, author = {Nicolaus, M. and Michler, S. P. M. and Ubels, R. and van der Velde, M. and Komdeur, J. and Both, C. and Tinbergen, J. M.}, @@ -868,6 +873,21 @@ @article{noble2017 type = {Journal Article} } +@article{ohara2010, + title = {Do not log{-}transform count data}, + author = {{O{\textquoteright}Hara}, Robert B. and Kotze, D. Johan}, + year = {2010}, + month = {05}, + date = {2010-05-04}, + journal = {Methods in Ecology and Evolution}, + pages = {118--122}, + volume = {1}, + number = {2}, + doi = {10.1111/j.2041-210x.2010.00021.x}, + url = {http://dx.doi.org/10.1111/j.2041-210X.2010.00021.x}, + langid = {en} +} + @article{open2015, author = {{Open Science Collaboration}}, title = {Estimating the reproducibility of psychological science}, diff --git a/renv.lock b/renv.lock index a61d8bb..e02f501 100644 --- a/renv.lock +++ b/renv.lock @@ -146,18 +146,19 @@ }, "ManyEcoEvo": { "Package": "ManyEcoEvo", - "Version": "2.0.0", + "Version": "2.7.6.9004", "Source": "GitHub", + "Remotes": "daniel1noble/orchaRd, NightingaleHealth/ggforestplot", "RemoteType": "github", "RemoteHost": "api.github.com", - "RemoteUsername": "egouldo", "RemoteRepo": "ManyEcoEvo", - "RemoteRef": "main", - "RemoteSha": "f0dd6ebedbdca7e4da2e9ae6ea5982b6eb38a2a8", - "Remotes": "daniel1noble/orchaRd, NightingaleHealth/ggforestplot", + "RemoteUsername": "egouldo", + "RemoteRef": "b4851853", + "RemoteSha": "b485185349a46f7d29c0c6e7dcf0ea9ca2f3d976", "Requirements": [ "R", "betapart", + "bookdown", "cli", "data.table", "dplyr", @@ -165,6 +166,7 @@ "fs", "glue", "here", + "lifecycle", "lme4", "magrittr", "metafor", @@ -172,11 +174,13 @@ "pointblank", "purrr", "rlang", + "rmarkdown", "tibble", "tidyr", - "tidyselect" + "tidyselect", + "withr" ], - "Hash": "55dddab07b5490a427740234a794a7b2" + "Hash": "3d044204cf705234f3e746e22151ade5" }, "Matrix": { "Package": "Matrix", @@ -257,14 +261,14 @@ }, "Rcpp": { "Package": "Rcpp", - "Version": "1.0.12", + "Version": "1.0.13", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "methods", "utils" ], - "Hash": "5ea2700d21e038ace58269ecdbeb9ec0" + "Hash": "f27411eb6d9c3dada5edd444b8416675" }, "RcppArmadillo": { "Package": "RcppArmadillo", @@ -282,16 +286,16 @@ }, "RcppEigen": { "Package": "RcppEigen", - "Version": "0.3.4.0.0", + "Version": "0.3.4.0.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "Rcpp", "stats", "utils" ], - "Hash": "df49e3306f232ec28f1604e36a202847" + "Hash": "4ac8e423216b8b70cb9653d1b3f71eb9" }, "RcppProgress": { "Package": "RcppProgress", @@ -367,28 +371,28 @@ }, "V8": { "Package": "V8", - "Version": "4.4.2", + "Version": "5.0.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "Rcpp", "curl", "jsonlite", "utils" ], - "Hash": "ca98390ad1cef2a5a609597b49d3d042" + "Hash": "9eb7b2df315593e726b029200fc0276c" }, "abind": { "Package": "abind", - "Version": "1.4-5", + "Version": "1.4-8", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "methods", "utils" ], - "Hash": "4f57884290cc75ab22f4af9e9d4ca862" + "Hash": "2288423bb0f20a457800d7fc47f6aa54" }, "ape": { "Package": "ape", @@ -451,9 +455,9 @@ }, "bayestestR": { "Package": "bayestestR", - "Version": "0.13.2", + "Version": "0.14.0.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "https://easystats.r-universe.dev", "Requirements": [ "R", "datawizard", @@ -463,7 +467,7 @@ "stats", "utils" ], - "Hash": "4a6a2eebe2db1dfb1c792c4ed91e73dc" + "Hash": "b52bd606aceefe77322b341f96696f2c" }, "beeswarm": { "Package": "beeswarm", @@ -533,10 +537,10 @@ }, "bitops": { "Package": "bitops", - "Version": "1.0-7", + "Version": "1.0-8", "Source": "Repository", - "Repository": "CRAN", - "Hash": "b7d8d8ee39869c18d8846a184dd8a1af" + "Repository": "RSPM", + "Hash": "da69e6b6f8feebec0827205aad3fdbd8" }, "blastula": { "Package": "blastula", @@ -577,6 +581,23 @@ ], "Hash": "40415719b5a479b87949f3aa0aee737c" }, + "bookdown": { + "Package": "bookdown", + "Version": "0.40", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "htmltools", + "jquerylib", + "knitr", + "rmarkdown", + "tinytex", + "xfun", + "yaml" + ], + "Hash": "896a79478a50c78fb035a37148638f4e" + }, "boot": { "Package": "boot", "Version": "1.3-30", @@ -669,7 +690,7 @@ }, "bslib": { "Package": "bslib", - "Version": "0.7.0", + "Version": "0.8.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ @@ -687,7 +708,7 @@ "rlang", "sass" ], - "Hash": "8644cc53f43828f19133548195d7e59e" + "Hash": "b299c6741ca9746fb227debcb0f9fb6c" }, "cachem": { "Package": "cachem", @@ -786,14 +807,14 @@ }, "cli": { "Package": "cli", - "Version": "3.6.2", + "Version": "3.6.3", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "utils" ], - "Hash": "1216ac65ac55ec0058a6f75d7ca0fd52" + "Hash": "b21916dd77a27642b447374a5d30ecf3" }, "clipr": { "Package": "clipr", @@ -858,9 +879,9 @@ }, "colorspace": { "Package": "colorspace", - "Version": "2.1-0", + "Version": "2.1-1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "grDevices", @@ -868,7 +889,7 @@ "methods", "stats" ], - "Hash": "f20c47fd52fae58b4e377c37bb8c335b" + "Hash": "d954cb1c57e8d8b756165d7ba18aa55a" }, "commonmark": { "Package": "commonmark", @@ -892,9 +913,9 @@ }, "correlation": { "Package": "correlation", - "Version": "0.8.4", + "Version": "0.8.5.1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "https://easystats.r-universe.dev", "Requirements": [ "R", "bayestestR", @@ -904,7 +925,7 @@ "parameters", "stats" ], - "Hash": "d8bd29a9abda6eed9aaab3ba5769f231" + "Hash": "3ae4d288f3314e6d24b63724ecb26de7" }, "corrplot": { "Package": "corrplot", @@ -932,25 +953,25 @@ }, "cpp11": { "Package": "cpp11", - "Version": "0.4.7", + "Version": "0.5.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "5a295d7d963cc5035284dcdbaf334f4e" + "Hash": "91570bba75d0c9d3f1040c835cee8fba" }, "crayon": { "Package": "crayon", - "Version": "1.5.2", + "Version": "1.5.3", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "grDevices", "methods", "utils" ], - "Hash": "e8a1e41acf02548751f45c718d55aa6a" + "Hash": "859d96e65ef198fd43e82b9628d593ef" }, "credentials": { "Package": "credentials", @@ -968,28 +989,28 @@ }, "curl": { "Package": "curl", - "Version": "5.2.1", + "Version": "5.2.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "411ca2c03b1ce5f548345d2fc2685f7a" + "Hash": "8f27335f2bcff4d6035edcc82d7d46de" }, "data.table": { "Package": "data.table", - "Version": "1.15.4", + "Version": "1.16.0", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", "methods" ], - "Hash": "8ee9ac56ef633d0c7cab8b2ca87d683e" + "Hash": "fb24e05d4a91d8b1c7ff8e284bde834a" }, "datawizard": { "Package": "datawizard", - "Version": "0.11.0", + "Version": "0.12.2", "Source": "Repository", "Repository": "CRAN", "Requirements": [ @@ -998,7 +1019,7 @@ "stats", "utils" ], - "Hash": "b99dd5378fefe0f87f44fd1723b9c71e" + "Hash": "4070785c656b9ded5d0ed5db387a96d5" }, "dbplyr": { "Package": "dbplyr", @@ -1129,14 +1150,14 @@ }, "digest": { "Package": "digest", - "Version": "0.6.35", + "Version": "0.6.37", "Source": "Repository", "Repository": "RSPM", "Requirements": [ "R", "utils" ], - "Hash": "698ece7ba5a4fa4559e3d537e7ec3d31" + "Hash": "33698c4b3127fc9f506654607fb73676" }, "doFuture": { "Package": "doFuture", @@ -1170,7 +1191,7 @@ }, "downlit": { "Package": "downlit", - "Version": "0.4.3", + "Version": "0.4.4", "Source": "Repository", "Repository": "CRAN", "Requirements": [ @@ -1186,7 +1207,7 @@ "withr", "yaml" ], - "Hash": "14fa1f248b60ed67e1f5418391a17b14" + "Hash": "45a6a596bf0108ee1ff16a040a2df897" }, "dplyr": { "Package": "dplyr", @@ -1232,7 +1253,7 @@ }, "effectsize": { "Package": "effectsize", - "Version": "0.8.8", + "Version": "0.8.9", "Source": "Repository", "Repository": "CRAN", "Requirements": [ @@ -1245,7 +1266,7 @@ "stats", "utils" ], - "Hash": "6709e156cf7869d95d8fd6e365337d2c" + "Hash": "7aceb5e07b6d48171c6b56714cc305ea" }, "ellipsis": { "Package": "ellipsis", @@ -1478,9 +1499,9 @@ }, "geometry": { "Package": "geometry", - "Version": "0.4.7", + "Version": "0.5.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "Rcpp", @@ -1489,7 +1510,7 @@ "lpSolve", "magic" ], - "Hash": "8e5ba8a115dee2730bab618934db4b85" + "Hash": "b052bd270aeddeca332c20feecfb039d" }, "gert": { "Package": "gert", @@ -2008,9 +2029,9 @@ }, "gt": { "Package": "gt", - "Version": "0.10.1", + "Version": "0.11.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "base64enc", @@ -2034,7 +2055,29 @@ "vctrs", "xml2" ], - "Hash": "03009c105dfae79460b8eb9d8cf791e4" + "Hash": "3470c2eb1123db6a2c54ec812de38284" + }, + "gtExtras": { + "Package": "gtExtras", + "Version": "0.5.0", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "cli", + "commonmark", + "dplyr", + "fontawesome", + "ggplot2", + "glue", + "gt", + "htmltools", + "knitr", + "paletteer", + "rlang", + "scales" + ], + "Hash": "654cdd2db0d2d1c2ab4ae8ee8af63168" }, "gtable": { "Package": "gtable", @@ -2316,16 +2359,16 @@ }, "insight": { "Package": "insight", - "Version": "0.20.1", + "Version": "0.20.4", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "methods", "stats", "utils" ], - "Hash": "9e968fb16772af41ca88ac3d51b09743" + "Hash": "8457d6e682a49f2c87b698a830527b09" }, "ipred": { "Package": "ipred", @@ -2452,9 +2495,9 @@ }, "knitr": { "Package": "knitr", - "Version": "1.47", + "Version": "1.48", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "evaluate", @@ -2464,7 +2507,7 @@ "xfun", "yaml" ], - "Hash": "7c99b2d55584b982717fcc0950378612" + "Hash": "acf380f300c721da9fde7df115a5f86f" }, "labeling": { "Package": "labeling", @@ -2610,9 +2653,9 @@ }, "lme4": { "Package": "lme4", - "Version": "1.1-35.3", + "Version": "1.1-35.5", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "MASS", "Matrix", @@ -2632,14 +2675,14 @@ "stats", "utils" ], - "Hash": "862f9d995f528f3051f524791955b20c" + "Hash": "16a08fc75007da0d08e0c0388c7c33e6" }, "lpSolve": { "Package": "lpSolve", - "Version": "5.6.20", + "Version": "5.6.21", "Source": "Repository", - "Repository": "CRAN", - "Hash": "2801c8082e89ed84cc0dbe43de850d31" + "Repository": "RSPM", + "Hash": "730a90bdc519fb0caff03df11218ddd8" }, "lubridate": { "Package": "lubridate", @@ -2788,13 +2831,13 @@ }, "minqa": { "Package": "minqa", - "Version": "1.2.7", + "Version": "1.2.8", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "Rcpp" ], - "Hash": "aba060ef3c097b26a4d304ea39d87f32" + "Hash": "785ef8e22389d4a7634c6c944f2dc07d" }, "modelbased": { "Package": "modelbased", @@ -2943,13 +2986,10 @@ }, "nloptr": { "Package": "nloptr", - "Version": "2.0.3", + "Version": "2.1.1", "Source": "Repository", - "Repository": "CRAN", - "Requirements": [ - "testthat" - ], - "Hash": "277c67a08f358f42b6a77826e4492f79" + "Repository": "RSPM", + "Hash": "27550641889a3abf3aec4d91186311ec" }, "nnet": { "Package": "nnet", @@ -2995,13 +3035,13 @@ }, "openssl": { "Package": "openssl", - "Version": "2.2.0", + "Version": "2.2.1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "askpass" ], - "Hash": "2bcca3848e4734eb3b16103bc9aa4b8e" + "Hash": "c62edf62de70cadf40553e10c739049d" }, "orchaRd": { "Package": "orchaRd", @@ -3035,6 +3075,20 @@ ], "Hash": "94355af60a14c04da0d79972a1165bc3" }, + "paletteer": { + "Package": "paletteer", + "Version": "1.6.0", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "prismatic", + "rematch2", + "rlang", + "rstudioapi" + ], + "Hash": "ac364dee7ff8277fb4144fcc1e26302f" + }, "parallelly": { "Package": "parallelly", "Version": "1.37.1", @@ -3049,9 +3103,9 @@ }, "parameters": { "Package": "parameters", - "Version": "0.21.7", + "Version": "0.22.1.7", "Source": "Repository", - "Repository": "CRAN", + "Repository": "https://easystats.r-universe.dev", "Requirements": [ "R", "bayestestR", @@ -3062,7 +3116,7 @@ "stats", "utils" ], - "Hash": "9f8ce76f256b193e1536c827c7adc125" + "Hash": "1bf7ea65b98b136e87ad9c09e6d46c51" }, "parsnip": { "Package": "parsnip", @@ -3142,9 +3196,9 @@ }, "performance": { "Package": "performance", - "Version": "0.12.0", + "Version": "0.12.2", "Source": "Repository", - "Repository": "CRAN", + "Repository": "https://easystats.r-universe.dev", "Requirements": [ "R", "bayestestR", @@ -3153,7 +3207,7 @@ "stats", "utils" ], - "Hash": "dc72bffc62740d81375f740223e8dc19" + "Hash": "6d0d9762077b82869a0ea9b17787b01e" }, "permute": { "Package": "permute", @@ -3253,24 +3307,25 @@ }, "pkgload": { "Package": "pkgload", - "Version": "1.3.4", + "Version": "1.4.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "cli", - "crayon", "desc", "fs", "glue", + "lifecycle", "methods", "pkgbuild", + "processx", "rlang", "rprojroot", "utils", "withr" ], - "Hash": "876c618df5ae610be84356d5d7a5d124" + "Hash": "2ec30ffbeec83da57655b850cf2d3e0e" }, "plyr": { "Package": "plyr", @@ -3351,6 +3406,19 @@ ], "Hash": "6b01fc98b1e86c4f705ce9dcfd2f57c7" }, + "prismatic": { + "Package": "prismatic", + "Version": "1.1.2", + "Source": "Repository", + "Repository": "CRAN", + "Requirements": [ + "R", + "farver", + "grDevices", + "graphics" + ], + "Hash": "51967d2e55a523791ae22832e86209ae" + }, "processx": { "Package": "processx", "Version": "3.8.4", @@ -3442,14 +3510,14 @@ }, "ps": { "Package": "ps", - "Version": "1.7.6", + "Version": "1.8.0", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "utils" ], - "Hash": "dd2b9319ee0656c8acf45c7f40c59de7" + "Hash": "4b9c8485b0c7eecdf0a9ba5132a45576" }, "purrr": { "Package": "purrr", @@ -3540,13 +3608,13 @@ }, "reactR": { "Package": "reactR", - "Version": "0.5.0", + "Version": "0.6.1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "htmltools" ], - "Hash": "c9014fd1a435b2d790dd506589cb24e5" + "Hash": "b8e3d93f508045812f47136c7c44c251" }, "reactable": { "Package": "reactable", @@ -3710,9 +3778,9 @@ }, "rmarkdown": { "Package": "rmarkdown", - "Version": "2.27", + "Version": "2.28", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "bslib", @@ -3729,7 +3797,7 @@ "xfun", "yaml" ], - "Hash": "27f9502e1cdbfa195f94e03b0f517484" + "Hash": "062470668513dcda416927085ee9bdc7" }, "roxygen2": { "Package": "roxygen2", @@ -3924,9 +3992,9 @@ }, "see": { "Package": "see", - "Version": "0.8.4", + "Version": "0.8.5", "Source": "Repository", - "Repository": "CRAN", + "Repository": "https://easystats.r-universe.dev", "Requirements": [ "R", "bayestestR", @@ -3942,7 +4010,7 @@ "performance", "stats" ], - "Hash": "3d2fd0b72314499e6af4fd20d39309dc" + "Hash": "bf62c13d7444a645fa6cfb7a54cdcdd3" }, "selectr": { "Package": "selectr", @@ -4459,13 +4527,13 @@ }, "tinytex": { "Package": "tinytex", - "Version": "0.51", + "Version": "0.52", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "xfun" ], - "Hash": "d44e2fcd2e4e076f0aac540208559d1d" + "Hash": "cfbad971a71f0e27cec22e544a08bc3b" }, "tokenizers": { "Package": "tokenizers", @@ -4597,13 +4665,13 @@ }, "uuid": { "Package": "uuid", - "Version": "1.2-0", + "Version": "1.2-1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R" ], - "Hash": "303c19bfd970bece872f93a824e323d9" + "Hash": "34e965e62a41fcafb1ca60e9b142085b" }, "vctrs": { "Package": "vctrs", @@ -4621,9 +4689,9 @@ }, "vegan": { "Package": "vegan", - "Version": "2.6-6.1", + "Version": "2.6-8", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "MASS", "R", @@ -4632,7 +4700,7 @@ "mgcv", "permute" ], - "Hash": "46a520e3fd3286168c3e4dc5e6fbb5b1" + "Hash": "1cf04791d6222288c30d3c7d887da1ea" }, "vipor": { "Package": "vipor", @@ -4719,21 +4787,20 @@ }, "waldo": { "Package": "waldo", - "Version": "0.5.2", + "Version": "0.5.3", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "cli", "diffobj", - "fansi", "glue", "methods", "rematch2", "rlang", "tibble" ], - "Hash": "c7d3fd6d29ab077cbac8f0e2751449e6" + "Hash": "16aa934a49658677d8041df9017329b9" }, "warp": { "Package": "warp", @@ -4754,15 +4821,15 @@ }, "withr": { "Package": "withr", - "Version": "3.0.0", + "Version": "3.0.1", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ "R", "grDevices", "graphics" ], - "Hash": "d31b6c62c10dcf11ec530ca6b0dd5d35" + "Hash": "07909200e8bbe90426fbfeb73e1e27aa" }, "workflows": { "Package": "workflows", @@ -4816,15 +4883,16 @@ }, "xfun": { "Package": "xfun", - "Version": "0.44", + "Version": "0.47", "Source": "Repository", - "Repository": "CRAN", + "Repository": "RSPM", "Requirements": [ + "R", "grDevices", "stats", "tools" ], - "Hash": "317a0538d32f4a009658bcedb7923f4b" + "Hash": "36ab21660e2d095fef0d83f689e0477c" }, "xml2": { "Package": "xml2", @@ -4864,10 +4932,10 @@ }, "yaml": { "Package": "yaml", - "Version": "2.3.8", + "Version": "2.3.10", "Source": "Repository", - "Repository": "CRAN", - "Hash": "29240487a071f535f5e5d5a323b7afbd" + "Repository": "RSPM", + "Hash": "51dab85c6c98e50a18d7551e9d49f76c" }, "yardstick": { "Package": "yardstick", diff --git a/stylefile.css b/stylefile.css new file mode 100644 index 0000000..04452a6 --- /dev/null +++ b/stylefile.css @@ -0,0 +1,15 @@ +caption, .table-caption { + margin: auto; + text-align: left; +} + +figure.quarto-float-tbl figcaption.quarto-float-caption-top { + text-align: left; + margin: auto; +} + +/* works for figure caption alignment as intended */ +/*figcaption { +/* margin: auto; +/* text-align: left; +/*} diff --git a/supp_mat/SM1_summary.qmd b/supp_mat/SM1_summary.qmd index 769aa40..cee251a 100644 --- a/supp_mat/SM1_summary.qmd +++ b/supp_mat/SM1_summary.qmd @@ -1,11 +1,10 @@ --- title: "Summarising Variation Among Analysis Specifications" -format: html +format: + html: + code-fold: true + echo: true editor: visual -code-fold: true -author: - - name: Elliot Gould - - name: Hannah S Fraser execute: freeze: auto # re-render only when source changes bibliography: ../ms/references.bib @@ -13,6 +12,7 @@ number-sections: true tbl-cap-location: top editor_options: chunk_output_type: console +pre-render: "utils.R" --- ```{r load-libs, include=TRUE,eval=TRUE,message=FALSE} @@ -32,6 +32,7 @@ library(ggh4x) library(showtext) set.seed(1234) +source(here::here("utils.R")) # extrafont::font_install("Lato") ``` @@ -39,25 +40,25 @@ set.seed(1234) # ----- Using complete objects in 'ManyEcoEvo' ----- Table1 <- # Teams, analyses and model types - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% select(subset_name, model_type_summary) %>% unnest(cols = model_type_summary) Table2 <- # model composition - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% select(subset_name, model_term_summary) %>% unnest(cols = model_term_summary) Table3 <- # choice of variables - ManyEcoEvo::ManyEcoEvo_study_summary %>% + ManyEcoEvo_study_summary %>% filter(subset_name != "all") %>% select(subset_name, variable_count_summary ) %>% unnest(cols = variable_count_summary) var_names <- - ManyEcoEvo::ManyEcoEvo %>% + ManyEcoEvo %>% pull(diversity_data) %>% map(~ .x %>% select(-id_col, -dataset) %>% colnames) %>% @@ -107,7 +108,7 @@ The composition of models varied substantially (@tbl-Table2) in regards to the n ```{r} #| label: tbl-Table2 -#| tbl-cap: "Mean, standard deviation and range of number of fixed and random variables and interaction terms used in models and sample size used. Repeated for effect size analyses only ($Z_r$) and out-of-sample prediction only ($y_i$)." +#| tbl-cap: "Mean, standard deviation and range of number of fixed and random variables, interaction terms used in models and analysis sample size (*N*). Repeated for effect-size analyses only ($Z_r$) and out-of-sample predictions only ($y_i$)." Table2 %>% rename(SD = sd, subset = subset_name) %>% @@ -117,18 +118,30 @@ Table2 %>% names_sep = ".", values_from = c(mean, SD, min, max) ) %>% - mutate(variable = case_when(variable == "samplesize" ~ "*N*", + mutate(variable = case_when(variable == "samplesize" ~ "N", TRUE ~ variable)) %>% gt::gt(rowname_col = "subset") %>% - gt::row_group_order(groups = c("fixed", "random", "interactions", "*N*")) %>% + gt::row_group_order(groups = c("fixed", "random", "interactions", "N")) %>% gt::tab_spanner_delim(delim = ".") %>% - gt::fmt_scientific(columns = c(contains("mean"), contains("SD")), + gt::fmt_scientific(columns = "mean.blue tit", + rows = `mean.blue tit` < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = "SD.blue tit", + rows = `SD.blue tit` < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = "mean.eucalyptus", + rows = `mean.eucalyptus` < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = "SD.eucalyptus", + rows = `SD.eucalyptus` < 0.01, decimals = 2) %>% gt::cols_label_with(fn = Hmisc::capitalize) %>% gt::tab_style( style = gt::cell_text(transform = "capitalize"), locations = gt::cells_column_spanners() ) %>% + gt::tab_style(style = gt::cell_text(transform = "capitalize"),locations = cells_row_groups()) %>% +gt::tab_style(style = gt::cell_text(style = "italic"), locations = cells_row_groups(groups = "N")) %>% gt::cols_label_with(c(contains("Eucalyptus")), fn = ~ gt::md(paste0("*",.x, "*"))) %>% gt::sub_values(columns = subset, values = c("effects"), @@ -147,7 +160,7 @@ The choice of variables also differed substantially among analyses (@tbl-Table3) ```{r} #| label: tbl-Table3 -#| tbl-cap: "Mean, SD, minimum and maximum number of analyses in which each variable was used, for effect size analyses only ($Z_r$), out-of-sample prediction only ($y_i$), using the full dataset." +#| tbl-cap: "Mean, $\\text{SE}$, minimum and maximum number of analyses in which each variable was used, for effect size analyses only ($Z_r$), out-of-sample prediction only ($y_i$), using the full dataset." #table 3 - summary of mean, sd and range for the number of analyses in which each variable was used Table3 %>% rename(SD = sd, subset = subset_name) %>% @@ -159,8 +172,19 @@ Table3 %>% ungroup %>% gt::gt(rowname_col = "subset") %>% gt::tab_spanner_delim(delim = ".") %>% - gt::fmt_scientific(columns = c(contains("mean"), contains("SD")), + gt::fmt_scientific(columns = "mean.blue tit", + rows = `mean.blue tit` < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = "SD.blue tit", + rows = `SD.blue tit` < 0.01, decimals = 2) %>% + gt::fmt_scientific(columns = "mean.eucalyptus", + rows = `mean.eucalyptus` < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = "SD.eucalyptus", + rows = `SD.eucalyptus` < 0.01, + decimals = 2) %>% + gt::fmt_number(decimals = 2,drop_trailing_zeros = T, drop_trailing_dec_mark = T) %>% gt::cols_label_with(fn = Hmisc::capitalize) %>% gt::cols_label_with(c(contains("Eucalyptus")), fn = ~ gt::md(paste0("*",.x, "*"))) %>% gt::sub_values(columns = subset, values = c("effects"), @@ -173,7 +197,8 @@ Table3 %>% gt::tab_style( style = gt::cell_text(transform = "capitalize"), locations = gt::cells_column_spanners() - ) + ) %>% + gt::as_raw_html() ``` ## Effect Size Specification Analysis @@ -187,6 +212,7 @@ We observed few clear trends in the blue tit specification curve (@fig-specr-bt) ```{r calc_MA_mod_coefs-2, eval=TRUE, cache = FALSE, eval = TRUE, warning=FALSE, message = FALSE} # knitr::read_chunk(here::here("index.qmd"), labels = "calc_MA_mod_coefs") #TODO why is here?? + coefs_MA_mod <- bind_rows( ManyEcoEvo_viz %>% filter(model_name == "MA_mod", exclusion_set == "complete", @@ -285,7 +311,6 @@ forest_plot_new_labels <- forest_plot_new_labels %>% ```{r} #| label: fig-specr-bt -#| echo: true #| warning: false #| message: false #| fig-height: 15 @@ -386,7 +411,6 @@ In the *Eucalyptus* specification curve, there are no strong trends (@fig-specr- #| fig-align: center #| message: false #| warning: false -#| echo: true #| fig-width: 12 #| fig-height: 15 #| fig-cap: "**A.** Forest plot for *Eucalyptus* analyses: standardized effect-sizes (circles) and their 95% confidence intervals are displayed for each analysis included in the meta-analysis model. The meta-analytic mean effect-size is denoted by a black diamond, with error bars also representing the 95% confidence interval. The dashed black line demarcates effect sizes of 0, whereby no effect of the test variable on the response variable is found. Blue points where $Z_r$ and its associated confidence intervals are greater than 0 indicate analyses that found a positive relationship of grass cover on *Eucalyptus* seedling success. Gray coloured points have confidence intervals crossing 0, indicating no relationship between the test and response variable. Red points indicate the analysis found a negative relationship between grass cover and *Eucalyptus seedling success*. **B.** Analysis specification plot: for each analysis plotted in A, the corresponding combination of analysis decisions is plotted. Each decision and its alternative choices is grouped into its own facet, with the decision point described on the right of the panel, and the option shown on the left. Lines indicate the option chosen used in the corresponding point in plot A. **C.** Sample sizes of each analysis. Note that empty bars indicate analyst did not report sample size and sample size could not be derived by lead team." diff --git a/supp_mat/SM2_EffectSizeAnalysis.qmd b/supp_mat/SM2_EffectSizeAnalysis.qmd index 4eeaabf..2558078 100644 --- a/supp_mat/SM2_EffectSizeAnalysis.qmd +++ b/supp_mat/SM2_EffectSizeAnalysis.qmd @@ -1,10 +1,13 @@ --- title: "Effect Size Analysis" # lib-dir: "renv/library/R-4.4/aarch64-apple-darwin20/" -format: html +format: + html: + code-fold: true + echo: true editor: visual number-sections: true -code-fold: true +pre-render: "utils.R" execute: freeze: auto # re-render only when source changes --- @@ -16,6 +19,7 @@ library(tidyverse) library(performance) library(broom.mixed) library(gt) +library(gtExtras) library(lme4) library(MuMIn) library(ManyEcoEvo) @@ -23,6 +27,7 @@ library(ggrepel) library(glue) library(gluedown) set.seed(1234) +source(here::here("utils.R")) ``` ```{r} @@ -54,9 +59,9 @@ ManyEcoEvo_results <- #### Effect of categorical review rating -The figures below (@fig-euc-cat-ratings-MA,@fig-bt-cat-ratings-MA) shows the fixed effect of categorical review rating on deviation from the meta-analytic mean. There is very little difference in deviation for analyses in any of the review categories. It is worth noting that each analysis features multiple times in these figures corresponding to the multiple reviewers that provided ratings. +The figures below (@fig-euc-cat-ratings-MA, @fig-bt-cat-ratings-MA) hows the fixed effect of categorical review rating on deviation from the meta-analytic mean. There is very little difference in deviation for analyses in any of the review categories. It is worth noting that each analysis features multiple times in these figures corresponding to the multiple reviewers that provided ratings. -```{r fig-euc-cat-ratings-MA, echo = FALSE, warning = FALSE, message = FALSE, fig.cap = "Orchard plot of meta-analytic model fitted to all eucalyptus analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.", fig.height=5, fig.width=8} +```{r fig-euc-cat-ratings-MA, echo = TRUE, warning = FALSE, message = FALSE, fig.cap = "Orchard plot of meta-analytic model fitted to all *Eucalyptus* analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.", fig.height=5, fig.width=8} orchard_publishability <- function(dat){ rma_mod_rating <- metafor::rma.mv(yi = Zr, @@ -87,7 +92,7 @@ ManyEcoEvo_results$effects_analysis[[2]] %>% scale_x_discrete(labels=c("Deeply Flawed\n & Unpublishable", "Publishable With\n Major Revision", "Publishable With\n Minor Revision", "Publishable\n As Is")) ``` -```{r fig-bt-cat-ratings-MA, echo = FALSE, warning = FALSE, message = FALSE, fig.cap = "Orchard plot of meta-analytic model fitted to all blue tit analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.", fig.height=5, fig.width=8} +```{r fig-bt-cat-ratings-MA, echo = TRUE, warning = FALSE, message = FALSE, fig.cap = "Orchard plot of meta-analytic model fitted to all blue tit analyses with a fixed effect for categorical peer-review ratings, and random effects for analyst ID and reviewer ID. Black circles denote coefficient mean for each categorical publishability rating. Thick error bars represent 95% confidence intervals and whiskers indicate 95% prediction intervals. Effect sizes are represented by circles and their size corresponds to the precision of the estimate.", fig.height=5, fig.width=8} ManyEcoEvo_results$effects_analysis[[1]] %>% # filter(Zr > -4) %>% @@ -112,7 +117,6 @@ The forest plots in @fig-all-forest-plots-Zr compare the distributions of $Z_r$ #| column: body-outset-right #| fig-cap: 'Forest plots of meta-analytic estimated standardized effect sizes ($Z_r$, blue circles) and their 95% confidence intervals for each effect size included in the meta-analysis model. The meta-analytic mean effect size is denoted by a black triangle and a dashed vertical line, with error bars also representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0, indicating no relationship between the test variable and the response variable. The left side of each panel shows the analysis team names (anonymous arbitrary names assigned by us), each followed by three numbers. The first number is the submission ID (some analyst teams submitted results to us on >1 submission form), the second number is the analysis ID (some analyst teams included results of >1 analysis in a given submission), and the third number is the effect ID (some analysts submitted values for >1 effect per analysis). Thus, each row in each forest plot is uniquely identified, but it is possible to determine which effects come from which analyses and which analysis teams. The plots in the top row depict effects from analyses of blue tit data, and the bottom row plots depict effects from analyses of Eucalyptus data. The right-most plots depict all usable effect sizes. The plots on the left exclude effects from analysis sets that received at least one rating of “unpublishable” from peer reviewers, and the plots in the middle exclude effects from analysis sets that received at least one rating of either “unpublishable” or “major revision” from peer reviewers.' #| message: false -#| echo: false #| fig-height: 14 #| fig-width: 10 # TeamIdentifier_lookup <- read_csv(here::here("data-raw/metadata_and_key_data/TeamIdentifierAnonymised.csv")) @@ -243,26 +247,21 @@ publishable_subsets_forest_data %>% #### Post-hoc analysis: Exploring the effect of excluding estimates in which we had reduced confidence -For each dataset (blue tit, Eucalyptus), we created a second, more conservative version, that excluded effects based on estimates of $df$ that we considered less reliable (@tbl-Zr-exclusion-subsetting). We compared the outcomes of analyses of the primary dataset (constituted according to our registered plan) with the outcomes of analyses of the more conservative version of the dataset. We also compared results from analyses of both of these versions of the dataset to versions with our post-hoc removal of outliers described in the main text. Our more conservative exclusions (based on unreliable estimates of $df$) had minimal impact on the meta-analytic mean for both blue tit and Eucalyptus analyses, regardless of whether outliers were excluded (@tbl-Zr-exclusion-subsetting). +For each dataset (blue tit, Eucalyptus), we created a second, more conservative version, that excluded effects based on estimates of $\mathit{df}$ that we considered less reliable (@tbl-Zr-exclusion-subsetting). We compared the outcomes of analyses of the primary dataset (constituted according to our registered plan) with the outcomes of analyses of the more conservative version of the dataset. We also compared results from analyses of both of these versions of the dataset to versions with our post-hoc removal of outliers described in the main text. Our more conservative exclusions (based on unreliable estimates of $\mathit{df}$ ) had minimal impact on the meta-analytic mean for both blue tit and Eucalyptus analyses, regardless of whether outliers were excluded (@tbl-Zr-exclusion-subsetting). ```{r} #| label: tbl-Zr-exclusion-subsetting -#| tbl-cap: "Estimated meta-analytic mean, standard error, and 95% confidence intervals, from analyses of the primary data set, the more conservative version of the dataset which excluded effects based on less reliable estimates of $df$, and both of these datasets with outliers removed." +#| tbl-cap: "Estimated meta-analytic mean, standard error, and 95% confidence intervals, from analyses of the primary data set, the more conservative version of the dataset which excluded effects based on less reliable estimates of $\\mathit{df}$, and both of these datasets with outliers removed." ManyEcoEvo_viz %>% dplyr::filter(estimate_type == "Zr", model_name == "MA_mod", - collinearity_subset != "collinearity_removed") %>% - hoist(tidy_mod_summary) %>% - unnest(tidy_mod_summary) %>% - filter(publishable_subset == "All", expertise_subset == "All") %>% - select(-publishable_subset, -expertise_subset) %>% - select(dataset, - exclusion_set, - estimate, - std.error, - statistic, - p.value, - starts_with("conf")) %>% + collinearity_subset != "collinearity_removed", + publishable_subset == "All", + expertise_subset == "All") %>% + select(dataset, exclusion_set, tidy_mod_summary) %>% + unnest(tidy_mod_summary) %>% + filter(type == "summary") %>% + select(-term, -type) %>% mutate(exclusion_set = case_when(exclusion_set == "complete" ~ "Primary dataset", @@ -278,7 +277,7 @@ group_by(exclusion_set) %>% fns = function(x) gtsummary::style_pvalue(x, prepend_p = FALSE)) %>% gt::fmt_number(columns = c(-p.value, -dataset)) %>% gt::cols_label(estimate = gt::md("$$\\hat\\mu$$"), - std.error = gt::md("$$SE[\\hat\\mu]$$"), + std.error = gt::md("$$\text{SE}[\\hat\\mu]$$"), conf.low = gt::md("95\\%CI")) %>% gt::cols_merge(columns = starts_with("conf"), pattern = "[{1},{2}]") %>% @@ -334,7 +333,6 @@ plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE ){ ```{r} #| label: euc-Zr-outliers-sm -#| echo: true #| warning: false #| message: false complete_euc_data <- @@ -343,22 +341,21 @@ complete_euc_data <- estimate_type == "Zr", model_name == "MA_mod", dataset == "eucalyptus", - publishable_subset == "All") %>% - select(model) %>% - mutate(plot_data = map(model, - .f = ~ broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - dplyr::mutate(point_shape = - ifelse(stringr::str_detect(term, "overall"), - "diamond", - "circle"), - Parameter = - forcats::fct_reorder(term, - estimate) %>% - forcats::fct_reorder(., - point_shape, - .desc = TRUE)) + publishable_subset == "All", + expertise_subset == "All") %>% + select(tidy_mod_summary) %>% + mutate(plot_data = map(tidy_mod_summary, + .f = ~ dplyr::mutate(.x, + point_shape = + ifelse(stringr::str_detect(term, "overall"), + "diamond", + "circle"), + Parameter = + forcats::fct_reorder(term, + estimate) %>% + forcats::fct_reorder(., + point_shape, + .desc = TRUE)) ), meta_analytic_mean = map_dbl(plot_data, ~ filter(.x, Parameter == "overall") %>% @@ -368,16 +365,14 @@ complete_euc_data <- mutate(parameter_type = case_when(str_detect(Parameter, "overall") ~ "mean", TRUE ~ "study")) -# complete_euc_data <- -# complete_euc_data %>% -# rename(id_col = term) %>% -# group_by(type) %>% -# group_split() %>% -# set_names(., complete_euc_data$type %>% unique) %>% -# # map_if(.x = ., names(.) == "study", -# # .f = ~ anonymise_teams(.x, TeamIdentifier_lookup)) %>% -# bind_rows() %>% -# rename(term = id_col) + # ManyEcoEvo_viz %>% + # filter(exclusion_set == "complete", + # estimate_type == "Zr", + # model_name == "MA_mod", + # dataset == "eucalyptus", + # publishable_subset == "All", + # expertise_subset == "All") %>% + # ) min_outlier_euc <- complete_euc_data %>% filter(type == "study") %>% @@ -465,21 +460,19 @@ bt_experts_only <- ManyEcoEvo_viz %>% filter(!!!filter_experts, dataset == "blue tit") %>% - select(model) %>% - mutate(plot_data = map(model, - .f = ~ broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE)%>% - dplyr::mutate(point_shape = - ifelse(stringr::str_detect(term, "overall"), - "diamond", - "circle"), - Parameter = - forcats::fct_reorder(term, - estimate) %>% - forcats::fct_reorder(., - point_shape, - .desc = TRUE)) + select(tidy_mod_summary) %>% + mutate(plot_data = map(tidy_mod_summary, + .f = ~ dplyr::mutate(.x, + point_shape = + ifelse(stringr::str_detect(term, "overall"), + "diamond", + "circle"), + Parameter = + forcats::fct_reorder(term, + estimate) %>% + forcats::fct_reorder(., + point_shape, + .desc = TRUE)) ), meta_analytic_mean = map_dbl(plot_data, ~ filter(.x, Parameter == "overall") %>% @@ -585,7 +578,6 @@ For the blue tit dataset, we created a subset of analyses that excluded effects #| label: fig-forest-plot-Zr-collinear-rm-subset #| fig-cap: "Forest plot of meta-analytic estimated effect-sizes $Z_{r}$, standard error and 95% confidence intervals of blue tit analyses with highly collinear analyses removed. The meta-analytic mean for the reduced subset is denoted by the black triangle, and a dashed vertical line, with error bars representing the 95% confidence interval. The solid black vertical line demarcates effect size of 0." #| fig-height: 6 -#| echo: true filter_collinear <- rlang::exprs(exclusion_set == "complete", publishable_subset == "All", @@ -598,20 +590,18 @@ filter_collinear <- rlang::exprs(exclusion_set == "complete", ManyEcoEvo_viz %>% filter(!!!filter_collinear) %>% - mutate(plot_data = map(model, - .f = ~ broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE)%>% - dplyr::mutate(point_shape = - ifelse(stringr::str_detect(term, "overall"), - "diamond", - "circle"), - Parameter = - forcats::fct_reorder(term, - estimate) %>% - forcats::fct_reorder(., - point_shape, - .desc = TRUE)) + mutate(plot_data = map(tidy_mod_summary, + .f = ~ dplyr::mutate(.x, + point_shape = + ifelse(stringr::str_detect(term, "overall"), + "diamond", + "circle"), + Parameter = + forcats::fct_reorder(term, + estimate) %>% + forcats::fct_reorder(., + point_shape, + .desc = TRUE)) ), meta_analytic_mean = map_dbl(plot_data, ~ filter(.x, Parameter == "overall") %>% @@ -638,16 +628,122 @@ ManyEcoEvo_viz %>% ### Out of sample predictions $y_i$ +#### Excluded analyses with constructed variables {#sec-excluded-yi} + +```{r} +#| label: excluded-constructed-yi +#| message: false + +by <- join_by(response_variable_name) # don't join on id_col: inc. other excl. + +# Analyst Constructed Variables +all_constructed_vars <- + ManyEcoEvo %>% + pull(data, dataset) %>% + list_rbind(names_to = "dataset") %>% + filter(str_detect(response_variable_type, "constructed")) %>% + distinct(dataset,response_variable_name) %>% + drop_na() %>% + arrange() + +# Constructed Variables Included in the ManyAnalysts meta-analysis +# (i.e. we have included them in the parameter tables) +ManyEcoEvo_yi_constructed_vars <- + ManyEcoEvo:::analysis_data_param_tables %>% + distinct(variable, dataset) %>% + rename(response_variable_name = variable) %>% + semi_join(all_constructed_vars, by) %>% + filter(!str_detect(response_variable_name, + "average.proportion.of")) # was excluded + +yi_constructed <- + ManyEcoEvo_yi_results %>% + pull(data, dataset) %>% + list_rbind(names_to = "dataset") %>% + filter(str_detect(response_variable_type, "constructed")) %>% + distinct(dataset, id_col, TeamIdentifier, response_variable_name) %>% + drop_na() + +excluded_yi_constructed <- + ManyEcoEvo %>% + pull(data, dataset) %>% + list_rbind(names_to = "dataset") %>% + filter(str_detect(response_variable_type, "constructed"), + str_detect(exclusions_all, "retain")) %>% + distinct(dataset, id_col, TeamIdentifier, response_variable_name) %>% + drop_na() %>% + anti_join(yi_constructed, by) #rm response vars in yi_constructed + +n_dropped_analyses <- + excluded_yi_constructed %>% + n_distinct("id_col") + +n_teams_w_dropped_analyses <- + excluded_yi_constructed %>% + group_by(TeamIdentifier) %>% + count() %>% + n_distinct("TeamIdentifier") +``` + +We standardized the $y_i$ estimates and their standard errors for the blue tit analyses using the population mean and standard deviations of the corresponding dependent variable for that analysis, as shown in @eq-Z-VZ, using the function `ManyEcoEvo::Z_VZ_preds()`. Note that this is NOT the same process as standardizing the effect sizes $Z_r$. We used the mean and standard deviation of the relevant raw datasets as our 'population' parameters. + +$$ +Z_j = \frac{\mu_i-\bar{x}_j}{\text{SD}_j} \\ +\\ +{\text{VAR}}_{Z_j} = \frac{{\text{SE}}_{\mu_i}}{{\text{SD}_j}} \\ +$$ {#eq-Z-VZ} + +Where $\mu$ is the population parameter taken from our original dataset for variable $i$, and $\bar{x}_j$ and $\text{SD}_j$ are the out of sample point estimate values supplied for analysis $j$. $\text{SE}_{{\mu}_{i}}$ is the standard error of the population mean for variable $i$, while ${\text{VAR}}_{{Z}_{j}}$ and ${Z}_{j}$ are the standardized variance and mean estimate for analysis $j$. Note that for the response variables that were scaled-and-centered, or else mean-centred before model fitting, we do not need to standardise because these are already on the Z-scale. In doing so we make the assumption that analysts' data subsetting will have little effect on the outcomes. For some analyses of the blue tit dataset, analysts constructed their own unique response variables, which meant we needed to reconstruct these variables in order to calculate the population parameters. Unfortunately we were not able to re-construct all variables used by the analysts, as we were unable to reproduce the data required for their re-construction, e.g. we were unable to reproduce principal component analyses or fitted models for extracting residuals [@tbl-constructed-var-exclusions]. A total of `r n_dropped_analyses` were excluded from out-of-sample meta-analysis, from `r n_teams_w_dropped_analyses` teams, including the following analysis identifiers: `r pull(excluded_yi_constructed, id_col) %>% gluedown::md_italic() %>% glue::glue_collapse(", ",last = " and ")`. + +```{r} +#| label: tbl-constructed-var-exclusions +#| tbl-cap: "Analyst-constructed variables and their inclusion in meta-analyses of out-of-sample predictions, $y\\_i$." +all_constructed_vars %>% + semi_join(ManyEcoEvo_yi_constructed_vars, by) %>% + mutate(included_in_yi = TRUE) %>% + bind_rows( + { + all_constructed_vars %>% + anti_join(ManyEcoEvo_yi_constructed_vars, by) %>% + mutate(included_in_yi = FALSE) + } + ) %>% + dplyr::mutate(included_in_yi = + case_match(included_in_yi, + TRUE ~ "check", + FALSE ~ "xmark" ), + response_variable_name = + gluedown::md_code(response_variable_name)) %>% + group_by(dataset) %>% + gt::gt() %>% + gt::cols_label(response_variable_name = "Constructed Variable", + included_in_yi = gt::md("Variable reconstructed for meta-analysis?")) %>% + gt::fmt_icon(included_in_yi) %>% + gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), + locations = cells_row_groups(groups = "eucalyptus")) %>% + gt::tab_style(style = cell_text(align = "center"), + locations = cells_body(columns = included_in_yi)) %>% + gt::tab_style(style = cell_text(align = "left"), + locations = cells_body(columns = response_variable_name)) %>% + gt::tab_style(style = cell_text(align = "left"), + locations = cells_column_labels(response_variable_name)) %>% + gt::tab_style(locations = cells_body(columns = response_variable_name), + style = cell_text(size = "small")) %>% + gt::fmt_markdown(columns = response_variable_name) %>% + gt::opt_stylize(style = 6, color = "gray", add_row_striping = TRUE) %>% + gt::opt_row_striping(row_striping = TRUE) +``` + #### Non-truncated $y_{i}$ meta-analysis forest plot -Below is the non-truncated version of @fig-euc-yi-forest-plot showing a forest plot of the out-of-sample predictions, $y_{i}$, on the response-scale (stems counts), for *Eucalyptus* analyses, showing the full error bars of all model estimates. +Below is the non-truncated version of @fig-euc-yi-forest-plot showing a forest plot of the out-of-sample predictions, $y_{i}$, on the response-scale (stem counts), for *Eucalyptus* analyses, showing the full error bars of all model estimates. ```{r} #| label: fig-euc-yi-forest-plot-full -#| fig-cap: "Forest plot of meta-analytic estimated out of sample predictions, $y_{i}$, on the response-scale (stems counts), for *Eucalyptus* analyses. Circles represent individual analysis estimates. Triangles represent the meta-analytic mean for each prediction scenario. Navy blue coloured points correspond to $y_{25}$ scenario, blue coloured points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (observations more than 3SD above the mean) have been removed prior to model fitting." +#| fig-cap: "Forest plot of meta-analytic estimated out of sample predictions, $y_{i}$, on the response-scale (stem counts) for *Eucalyptus* analyses. Circles represent individual analysis estimates. Triangles represent the meta-analytic mean for each prediction scenario. Navy blue coloured points correspond to $y_{25}$ scenario, blue coloured points correspond to the $y_{50}$ scenario, while light blue points correspond to the $y_{75}$ scenario. Error bars are 95% confidence intervals. Outliers (i.e. observations with mean estimates more than 3SD above the population parameter mean, see @sec-excluded-yi) have been removed prior to model fitting." #| fig-height: 8 -#| echo: true #| message: false +#| fig-keep: last plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numeric(2L)){ if(MA_mean == FALSE){ @@ -692,6 +788,7 @@ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numer if(intercept == TRUE){ p <- p + geom_hline(yintercept = 0) } + if(MA_mean == TRUE){ p <- p + geom_hline(aes(yintercept = plot_data %>% @@ -717,71 +814,30 @@ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE, y_zoom = numer print(p) } -# TODO put into R/ and build into package to call! -fit_MA_mv <- function(effects_analysis, Z_colname, VZ_colname, estimate_type){ - Zr <- effects_analysis %>% pull({{Z_colname}}) - VZr <- effects_analysis %>% pull({{VZ_colname}}) - mod <- ManyEcoEvo::fit_metafor_mv(estimate = Zr, - variance = VZr, - estimate_type = estimate_type, - data = effects_analysis) - return(mod) -} - -back_transformed_predictions <- - ManyEcoEvo_yi %>% - dplyr::mutate(data = - purrr::map(data, - ~ dplyr::filter(.x, - stringr::str_detect(response_variable_type, "constructed", negate = TRUE)))) %>% - prepare_response_variables_yi(estimate_type = "yi", - param_table = ManyEcoEvo:::analysis_data_param_tables) %>% - generate_yi_subsets() - - -raw_mod_data_logged <- - back_transformed_predictions %>% - filter(dataset == "eucalyptus") %>% - group_by(estimate_type) %>% - select(estimate_type, data) %>% - unnest(data) %>% - rename(study_id = id_col) %>% - hoist(params, param_mean = list("value", 1), param_sd = list("value", 2)) %>% - rowwise() %>% - mutate(exclusion_threshold = param_mean + 3*param_sd) %>% - filter(fit < exclusion_threshold) %>% - mutate(log_vals = map2(fit, se.fit, log_transform, 1000)) %>% - unnest(log_vals) %>% - select(study_id, - TeamIdentifier, - estimate_type, - starts_with("response_"), - -response_id_S2, - ends_with("_log")) %>% - group_by(estimate_type) %>% - nest() - - -mod_data_logged <- raw_mod_data_logged %>% - mutate(MA_mod = - map(data, - ~fit_MA_mv(.x, mean_log, std.error_log, "yi"))) - - -plot_data_logged <- mod_data_logged %>% - mutate(tidy_mod = map(.x = MA_mod, - ~broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - rename(study_id = term))) %>% - select(tidy_mod) %>% - unnest(cols = c(tidy_mod)) - -plot_data_logged %>% - mutate(response_scale = map2(estimate, std.error, log_back, 1000)) %>% - select(estimate_type, study_id, type, response_scale) %>% - unnest(response_scale) %>% +# ---- new code ---- + +eucalyptus_yi_plot_data <- + ManyEcoEvo_yi_viz %>% + filter(dataset == "eucalyptus", model_name == "MA_mod") %>% + unnest(cols = tidy_mod_summary) %>% + mutate(response_scale = list(log_back(estimate, std.error, 1000)), + .by = c(dataset, estimate_type, term, type), + .keep = "used") %>% + select(-estimate, -std.error) %>% + unnest_wider(response_scale) %>% rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% -# filter(estimate <1000) %>% - plot_forest_2(MA_mean = T,y_zoom = c(0,140)) + nest(tidy_mod_summary = c(-dataset, -estimate_type)) %>% #extract euc data for plotting (on count scale, not log scale) + select(dataset, estimate_type, tidy_mod_summary) %>% + unnest(cols = tidy_mod_summary) %>% + rename(study_id = term) %>% + ungroup() + +max_x_axis <- + eucalyptus_yi_plot_data %>% + pluck("conf.high", max) %>% + round() + 10 + +eucalyptus_yi_plot_data %>% + plot_forest_2(MA_mean = T, y_zoom = c(0, max_x_axis)) + + theme(axis.text.y = element_blank()) ``` diff --git a/supp_mat/SM3_ExplainingDeviation.qmd b/supp_mat/SM3_ExplainingDeviation.qmd index 276f70f..423bb5f 100644 --- a/supp_mat/SM3_ExplainingDeviation.qmd +++ b/supp_mat/SM3_ExplainingDeviation.qmd @@ -1,10 +1,13 @@ --- title: "Explaining Variation in Deviation Scores" -format: html -code-fold: true +format: + html: + code-fold: true + echo: true number-sections: true execute: - freeze: false # re-render only when source changes + freeze: auto # re-render only when source changes +pre-render: "utils.R" bibliography: - ../ms/references.bib - ../ms/grateful-refs.bib @@ -24,6 +27,7 @@ library(performance) library(broom.mixed) library(gt) library(lme4) +library(parameters) #must be loaded directly else parameters fail library(MuMIn) library(ManyEcoEvo) library(tidymodels) @@ -31,32 +35,13 @@ library(multilevelmod) library(rlang) set.seed(1234) - -gt_fmt_yi <- function(gt_tbl, columns, ...) { - gt_tbl %>% - gt::fmt(!!columns, - fns = function(x) str_replace(x, "y25", gt::md("$$y_{25}$$")) %>% - str_replace("y50", gt::md("$$y_{50}$$")) %>% - str_replace("y75", gt::md("$$y_{75}$$")), - ...) -} +source(here::here("utils.R")) ``` ```{r define-fns, eval = TRUE} -#TODO turn into own function and pull out of nested targets function and rm here -fit_MA_mv <- function(effects_analysis, Z_colname, VZ_colname, estimate_type){ - Zr <- effects_analysis %>% pull({{Z_colname}}) - VZr <- effects_analysis %>% pull({{VZ_colname}}) - mod <- ManyEcoEvo::fit_metafor_mv(estimate = Zr, - variance = VZr, - estimate_type = estimate_type, - data = effects_analysis) - return(mod) -} - plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ - if(MA_mean == FALSE){ + if (MA_mean == FALSE) { data <- filter(data, study_id != "overall") } @@ -91,10 +76,10 @@ plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ minor_breaks = seq(from = -4.5, to = 1.5, by = 0.5)) + NatParksPalettes::scale_color_natparks_d("Glacier") - if(intercept == TRUE){ + if (intercept == TRUE) { p <- p + geom_hline(yintercept = 0) } - if(MA_mean == TRUE){ + if (MA_mean == TRUE) { # p <- p + geom_hline(aes(yintercept = meta_analytic_mean), # data = data, # colour = "#01353D", @@ -106,7 +91,7 @@ plot_forest <- function(data, intercept = TRUE, MA_mean = TRUE){ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE){ - if(MA_mean == FALSE){ + if (MA_mean == FALSE) { data <- filter(data, study_id != "overall") } @@ -138,10 +123,10 @@ plot_forest_2 <- function(data, intercept = TRUE, MA_mean = TRUE){ scale_y_continuous(breaks = scales::breaks_extended(10)) + NatParksPalettes::scale_color_natparks_d("Glacier") - if(intercept == TRUE){ + if (intercept == TRUE) { p <- p + geom_hline(yintercept = 0) } - if(MA_mean == TRUE){ + if (MA_mean == TRUE) { p <- p + geom_hline(aes(yintercept = plot_data %>% filter(type == "summary", estimate_type == "y25") %>% @@ -206,7 +191,7 @@ create_model_workflow <- function(outcome, fixed_effects, random_intercepts){ set_engine("lmer") workflow_formula <- workflow() %>% - add_variables(outcomes = outcome, + add_variables(outcomes = all_of(outcome), predictors = all_of(c(fixed_effects, random_intercepts))) %>% add_model(model, formula = model_formula) #%>% # add_case_weights(weight) @@ -288,14 +273,6 @@ possibly_check_convergence <- possibly(performance::check_convergence, possibly_check_singularity <- possibly(performance::check_singularity, otherwise = NA) -# Define glm model checknig fun (my pr to performance:: https://github.com/easystats/performance/pull/605) -check_convergence._glm <- function(x, ...){ - isTRUE(x$fit$converged) -} - -possibly_check_convergence_glm <- possibly(check_convergence._glm, - otherwise = NA) - # define plotting fun for walk plotting plot_continuous_rating <- function(plot_data){ plot_data %>% @@ -348,6 +325,20 @@ plot_model_means_RE <- function(data, variable, predictor_means) { poss_fit <- possibly(fit, otherwise = NA, quiet = FALSE) +create_model_formulas <- function(outcome, fixed_effects, random_intercepts){ + # https://community.rstudio.com/t/programmatically-generate-formulas-for-lmer/8575 + # ---- Define random effects constructor function ---- + randomify <- function(feats) { + paste0("(1|", feats, ")", collapse = " + ") + } + # ---- Construct formula ---- + randomify <- function(feats) paste0("(1|", feats, ")", collapse = " + ") + fixed <- paste0(fixed_effects, collapse = " + ") + random <- randomify(random_intercepts) + model_formula <- as.formula(paste(outcome, "~", fixed, "+", random)) + return(model_formula) +} + ``` ## Box-Cox transformation of response variable for model fitting @@ -358,93 +349,51 @@ Consequently, each dataset has its own unique value of $\lambda$, and therefore ```{r} #| label: fig-box-cox-transformations -#| fig-cap: "Box-Coxtransformed absolute deviation scores plotted against (untransformed) absolute deviation scores." -#| column: body-outset +#| fig-cap: "Box-Cox transformed absolute deviation scores plotted against (untransformed) absolute deviation scores." #| fig-height: 8 +#| fig-width: 10 #| message: false -back_transformed_predictions <- - ManyEcoEvo::ManyEcoEvo_yi %>% - dplyr::mutate(data = - purrr::map(data, - ~ dplyr::filter(.x, - stringr::str_detect(response_variable_type, "constructed", negate = TRUE)))) %>% - prepare_response_variables_yi(estimate_type = "yi", - param_table = ManyEcoEvo:::analysis_data_param_tables) %>% - generate_yi_subsets() - -raw_mod_data_logged <- - back_transformed_predictions %>% - filter(dataset == "eucalyptus") %>% - group_by(estimate_type) %>% - select(estimate_type, data) %>% - unnest(data) %>% - rename(study_id = id_col) %>% - hoist(params, param_mean = list("value", 1), param_sd = list("value", 2)) %>% - rowwise() %>% - mutate(exclusion_threshold = param_mean + 3*param_sd) %>% - filter(fit < exclusion_threshold) %>% - mutate(log_vals = map2(fit, se.fit, log_transform, 1000)) %>% - unnest(log_vals) %>% - select(study_id, - TeamIdentifier, - estimate_type, - starts_with("response_"), - -response_id_S2, - ends_with("_log")) %>% - group_by(estimate_type) %>% - nest() - -mod_data_logged <- raw_mod_data_logged %>% - mutate(MA_mod = - map(data, - ~fit_MA_mv(.x, mean_log, std.error_log, "yi"))) - -deviation_models_yi_euc <- - raw_mod_data_logged %>% - mutate(dataset = "eucalyptus", - exclusion_set = "complete") %>% - select(dataset, estimate_type, exclusion_set, data) %>% # rearrange cols - left_join({ManyEcoEvo_yi %>% - mutate(review_dat = map(data, select, id_col, review_data, mixed_model)) %>% - select(dataset, review_dat, diversity_data)}, by = "dataset") %>% - mutate(data = map(data, ~ rename(.x, - id_col = study_id, - Z_logged = mean_log, - VZ_logged = std.error_log)), #required by compute_MA_inputs() and meta_analyse_dataset() - data = map2(data, review_dat, left_join, by = "id_col"), - diversity_data = # this step filters diversity_data according to matches in data, is also applied in prepare_yi - map2(.x = diversity_data, - .y = data, - .f = ~ semi_join(.x, .y, by = "id_col") %>% distinct), - .keep = "unused") %>% #drops cols that aren't mutated - ManyEcoEvo::compute_MA_inputs() %>% - ManyEcoEvo::meta_analyse_datasets() +prep_math_label_estimate_type <- function(estimate_string){ + paste0(substring(estimate_string, 1, 1), + "[", substring(estimate_string, 2, 3), "]") +} + +filter_vars_main_analysis <- rlang::exprs(estimate_type == "Zr", + exclusion_set == "complete", + publishable_subset == "All", + expertise_subset == "All", + collinearity_subset == "All") transformation_plot_data <- - ManyEcoEvo_yi_results %>% - ungroup %>% - filter(exclusion_set == "complete", - dataset == "blue tit") %>% - bind_rows(deviation_models_yi_euc) %>% + ManyEcoEvo::ManyEcoEvo_yi_results %>% bind_rows(ManyEcoEvo_results %>% - filter(exclusion_set == "complete", - publishable_subset == "All", - collinearity_subset == "All", - expertise_subset == "All")) %>% + filter(!!!filter_vars_main_analysis)) %>% select(dataset, estimate_type, effects_analysis) %>% hoist(effects_analysis, "abs_deviation_score_estimate", "box_cox_abs_deviation_score_estimate") %>% hoist(effects_analysis, "lambda", .simplify = TRUE, .transform = ~unique(.x)) %>% select(-effects_analysis) %>% unnest(cols = c(abs_deviation_score_estimate, - box_cox_abs_deviation_score_estimate)) + box_cox_abs_deviation_score_estimate)) transformation_plot_data %>% + mutate(estimate_type = forcats::as_factor(estimate_type), + estimate_type = forcats::fct_relabel(estimate_type, prep_math_label_estimate_type), + dataset = case_match(dataset, + "eucalyptus" ~ "Eucalyptus", + .default = dataset), + dataset = dplyr::if_else(str_detect(dataset, "blue"), + latex2exp::TeX(dataset, output = "character"), + latex2exp::TeX(dataset, italic = TRUE, output = "character") ) + ) %>% ggplot(aes(y = abs_deviation_score_estimate, x = box_cox_abs_deviation_score_estimate)) + geom_point() + - facet_grid(dataset~estimate_type, scales = "free") + + ggh4x::facet_grid2(c("dataset", "estimate_type"), + scales = "free", + independent = "all", + labeller = labeller(estimate_type = label_parsed, dataset = label_parsed)) + geom_label(aes(x = -Inf, y = Inf, label = latex2exp::TeX(paste("$\\lambda =$", round(lambda, digits = 4)), output = "character"), hjust = -0.2, vjust = 2), @@ -456,24 +405,20 @@ transformation_plot_data %>% ## Model Convergence and Singularity problems {#sec-convergence-singularity} -During model fitting, especially during fitting of models with random effects using `lme4` [@lme4], some models failed to converge while others were accompanied with console warnings of singular fit. -However, the convergence checks from `lme4` are known to be too strict (see `?performance::check_convergence()` documentation for a discussion of this issue), consequently we checked for model warnings of convergence failure using the `check_convergence()` function from the `performance` package [@performance]. -For all models we double-checked that they did not have singular fit by using `performance::check_singularity`. -Despite passing `performance::check_singularity()`, `parameters::parameters()` was unable to properly estimate SE and confidence intervals for the random effects of some models, which suggests singular fit. -For all models we also checked whether the SE of random effects could be calculated, and if not, marked these models as being singular. +During model fitting, especially during fitting of models with random effects using `lme4::` [@lme4], some models failed to converge while others were accompanied with console warnings of singular fit. +However, the convergence checks from `lme4::` are known to be overly strict (see `?performance::check_convergence()` documentation for a discussion of this issue), consequently we checked for model warnings of convergence failure using the `performance::check_convergence()` function from the `performance::` package [@performance]. +For all models we double-checked that they did not have singular fit by using `performance::check_singularity()`. +Despite passing singularity checks with the `performance::` package, `parameters::parameters()` was unable to properly estimate $\text{SE}$ and confidence intervals for the random effects of some models, which suggests singular fit. +For all models we also checked whether the $\text{SE}$ of random effects estimates could be calculated, and if not, marked these models as being singular. Analyses of singularity and convergence are presented throughout this document under the relevant section-heading for the analysis type and outcome, i.e. effect size ($Z_r$) or out-of-sample predictions ($y_i$). ## Deviation Scores as explained by Reviewer Ratings ### Effect Sizes $Z_r$ {#sec-Zr-deviation-ratings} -For models of deviation explained by categorical peer ratings, including random effects for both the effect ID and the reviewer ID resulted in models with singular fit, or that failed to converge, for both blue tit and *Eucalyptus* datasets (@tbl-explore-Zr-deviation-random-effects-structure). -For the *Eucalyptus* dataset, when a random effect was included for Reviewer ID only, the model passed the check with `performance::check_singularity()`, however, the SD and CI could not be estimated by `parameters::model_parameters()` with a warning stating this was likely due to singular fit. -When fitting models of deviation explained by categorical peer ratings, we consequently included a random effect for Reviewer ID only (See @tbl-deviation-rating-estimates). +Models pf deviation explained by categorical peer ratings all had singular fit or failed to converge for both blue tit and *Eucalyptus* datasets when random efects were included for both the effect ID and the reviewer ID (@tbl-explore-Zr-deviation-random-effects-structure). For the *Eucalyptus* dataset, when a random effect was included for effect ID only, the model failed to converge. The same was true for the blue tit dataset. As for the effect-size analysis, we included a random-effect for Reviewer ID only when fitting models of deviation explained by categorical peer ratings (See @tbl-deviation-rating-estimates). -For models of deviation explained by continuous peer-review ratings, when including both random effects for effect ID and Reviewer ID model fits were singular for both datasets (@tbl-explore-Zr-deviation-random-effects-structure). -For the *Eucalyptus* dataset when including a random effect only for Reviewer ID and dropping the random effect for effect ID, this model passed the `performance::check_singularity()` check, however, however, the SD and CI could not be estimated by `parameters::model_parameters()` with a warning stating this was likely due to singular fit. -Consequently, for both blue tit and *Euclayptus* datasets, we fitted and analysed models of deviation explained by continuous peer review ratings with a random effect for Effect ID only (See @tbl-deviation-rating-estimates). +For models of deviation explained by continuous peer-review ratings, when including both random effects for effect ID and Reviewer ID model fits were singular for both datasets (@tbl-explore-Zr-deviation-random-effects-structure). The models passed the `performance::check_singularity()` check, however, however, the $\text{SD}$ and CI could not be estimated by `parameters::model_parameters()` with a warning stating this was likely due to singular fit. For models with a random effect for effect ID, the same occurred for the blue tit dataset, whereas for the *Eucalyptus* dataset, the model did not converge at all. Consequently, for both blue tit and *Euclayptus* datasets, we fitted and analysed models of deviation explained by continuous peer review ratings with a random effect for Reviewer ID only (See @tbl-deviation-rating-estimates). ```{r} #| label: tbl-explore-Zr-deviation-random-effects-structure @@ -482,8 +427,11 @@ Consequently, for both blue tit and *Euclayptus* datasets, we fitted and analyse library(multilevelmod) +poss_extract_fit_engine <- purrr::possibly(extract_fit_engine, otherwise = NA) +poss_parameters <- purrr::possibly(parameters::parameters, otherwise = NA) + model <- linear_reg() %>% - set_engine("lmer") + set_engine("lmer", control = lmerControl(optimizer = "nloptwrap")) base_wf <- workflow() %>% add_model(model) @@ -526,42 +474,25 @@ model_vars <- ) # ----- Run all models for all combinations of dataset, exclusion_set, and publishable_subset ---- -# And Extract - +# And Extract +set.seed(1234) all_model_fits <- model_vars %>% cross_join(., {ManyEcoEvo::ManyEcoEvo_results %>% - select(dataset, - exclusion_set, - estimate_type, - publishable_subset) %>% - filter(expertise_subset == "All", - collinearity_subset == "All") %>% - ungroup %>% - select(-expertise_subset, -collinearity_subset)}) %>% - left_join(., {ManyEcoEvo::ManyEcoEvo_results %>% - select(dataset, - exclusion_set, - estimate_type, - publishable_subset, - effects_analysis) %>% - filter(expertise_subset == "All", - collinearity_subset == "All") %>% - ungroup %>% - select(-expertise_subset, -collinearity_subset)}, - by = join_by(dataset, - exclusion_set, - estimate_type, - publishable_subset)) %>% - ungroup %>% + select(estimate_type, ends_with("set"), effects_analysis) %>% + dplyr::filter(expertise_subset == "All", + collinearity_subset == "All") %>% + select(-c(expertise_subset, collinearity_subset))}) %>% + ungroup() %>% filter(publishable_subset == "All", exclusion_set == "complete") %>% + select(-c(exclusion_set, publishable_subset)) %>% mutate(effects_analysis = map(effects_analysis, ~ .x %>% unnest(review_data) %>% - select(study_id, + select(any_of(c("id_col", "study_id")), starts_with("box_cox_abs_dev"), RateAnalysis, PublishableAsIs, @@ -583,10 +514,10 @@ all_model_fits <- unnest_wider(random_intercepts, names_sep = "_") %>% select(-outcome, -model_workflows, - -fitted_mod_workflow, + -fitted_mod_workflow, -effects_analysis, estimate_type) %>% - replace_na(list(convergence = FALSE, singularity = TRUE)) + replace_na(list(convergence = FALSE)) # If singularity == FALSE and convergence == TRUE, but the model appears here, then that's because # the SD and CI's couldn't be estimated by parameters:: @@ -596,7 +527,19 @@ Zr_singularity_convergence <- left_join({all_model_fits %>% unnest(params) %>% filter(Effects == "random") %>% - filter(is.infinite(CI_high) | is.na(SE)) %>% + filter(if_any(contains("SE"), list(is.infinite, is.na))) %>% + distinct(fixed_effects, + random_intercepts_1, + random_intercepts_2, + dataset, + estimate_type, + convergence, + singularity) %>% + mutate(SE_calc = FALSE)}) %>% + left_join({all_model_fits %>% + unnest(params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("CI"), list(is.infinite, is.na))) %>% distinct(fixed_effects, random_intercepts_1, random_intercepts_2, @@ -604,13 +547,16 @@ Zr_singularity_convergence <- estimate_type, convergence, singularity) %>% - mutate(SD_calc = FALSE)}) %>% - mutate(SD_calc = ifelse(is.na(SD_calc), TRUE, SD_calc)) + mutate(CI_calc = FALSE)}) %>% + rowwise() %>% + mutate(across(ends_with("_calc"), + ~ replace_na(.x, TRUE))) %>% + mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) # ----- new code showing ALL model fits not just bad fits Zr_singularity_convergence %>% - select(-fitted_model, -params, -exclusion_set, -publishable_subset, -estimate_type) %>% + select(-fitted_model, -params, -estimate_type) %>% arrange(dataset, fixed_effects, random_intercepts_1, @@ -620,7 +566,9 @@ Zr_singularity_convergence %>% ~ str_replace_all(.x, "_", " ") %>% Hmisc::capitalize() %>% str_replace("id", "ID")), - dataset = case_when(dataset == "eucalyptus" ~ Hmisc::capitalize(dataset), TRUE ~ dataset)) %>% + dataset = + case_when(dataset == "eucalyptus" ~ Hmisc::capitalize(dataset), + TRUE ~ dataset)) %>% group_by(dataset) %>% gt::gt() %>% gt::text_transform( @@ -640,17 +588,20 @@ Zr_singularity_convergence %>% locations = list( cells_body(columns = "singularity", rows = singularity == TRUE), cells_body(columns = "convergence", rows = convergence == FALSE), #TODO why didn't work here?? - cells_body(columns = "SD_calc", rows = SD_calc == FALSE) + cells_body(columns = "SE_calc", rows = SE_calc == FALSE), + cells_body(columns = "CI_calc", rows = CI_calc == FALSE) ) ) %>% - gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", "no" ), - locations = cells_body(columns = c("singularity", "convergence", "SD_calc"))) %>% + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), + locations = cells_body(columns = c("singularity", "convergence", "SE_calc", "CI_calc"))) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::cols_label(dataset = "Dataset", fixed_effects = "Fixed Effect", singularity = "Singular Fit?", convergence = "Model converged?", - SD_calc = "Can random effect SD be calculated?") %>% + SE_calc = gt::md("Can random effects $\\text{SE}$ be calculated?"), + CI_calc = "Can random effect 95% CI be calculated?") %>% gt::tab_spanner(label = "Random Effects", columns = gt::starts_with("random")) %>% gt::sub_missing() %>% @@ -660,191 +611,276 @@ Zr_singularity_convergence %>% cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), style = cell_text(style = "italic")) %>% - gt::text_transform(fn = function(x) str_replace(x, "publishable_as_is", "Categorical Peer Rating") %>% str_replace(., "rate_analysis", "Continuous Peer Rating"), - locations = cells_body(columns = c("fixed_effects"))) - + gt::text_transform(fn = function(x) str_replace(x, "publishable_as_is", "Categorical Peer Rating") %>% + str_replace(., "rate_analysis", "Continuous Peer Rating"), + locations = cells_body(columns = c("fixed_effects"))) %>% + gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), + locations = cells_row_groups(groups = "Eucalyptus")) ``` + +### Out of sample predictions $y_i$ + +As for effect-size estimates $Z_r$, we encountered convergence and singularity problems when fitting models of deviation in out-of-sample predictions $y_i$ explained by categorical peer ratings for both datasets (@tbl-explore-yi-deviation-random-effects-structure). For all continuous models across both datasets, we encountered convergence and singularity problems when including random effects for both effect ID and Reviewer ID, as well as when including random effects for the effect ID only. In the latter case, for many prediction scenarios, across both blue tit and *Eucalyptus* datasets, estimated random effect coefficient CI's and $\text{SE}$ could not be estimated. For models of deviation in out-of-sample predictions explained by continuous peer review ratings, when a random effect was included for effect ID only, CI's returned values of 0 for both bounds and model means estimated with `modelbased::estimate_means()` could not be reliably estimated and were equal for every peer-rating category (@tbl-explore-yi-deviation-random-effects-structure). Consequently, we fitted models of deviation in out-of-sample predictions explained by continuous peer ratings with a random effect for Reviewer ID only (@tbl-yi-deviation-ratings-convergence-singularity). These model structures matched converging and non-singular model structures for effect-size estimates $Z_r$ (@tbl-explore-Zr-deviation-random-effects-structure). + ```{r} -#| label: calculate-summary-stats-yi -#| warning: false -#| message: false +#| label: tbl-explore-yi-deviation-random-effects-structure +#| tbl-cap: 'Singularity and convergence checking outcomes for models of deviation in out-of-sample predictions $y_r$ explained by peer-review ratings for different random effect structures. Problematic checking outcomes are highlighted in red.' +#| cache: false -plot_data_logged <- mod_data_logged %>% - mutate(tidy_mod = map(.x = MA_mod, - ~broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - rename(study_id = term))) %>% - select(tidy_mod) %>% - unnest(cols = c(tidy_mod)) - -MA_yi_summary_stats <- # ALL ON logged RESPONSE SCALE for EUC, standardized response values for BT - plot_data_logged %>% - mutate(response_scale = map2(estimate, std.error, log_back, 100)) %>% - select(estimate_type, study_id, type, response_scale) %>% - unnest(response_scale) %>% - rename(estimate = mean_origin, conf.low = lower, conf.high = upper) %>% - nest(tidy_mod = -estimate_type) %>% - mutate(dataset = "eucalyptus") %>% - bind_rows({ - ManyEcoEvo::ManyEcoEvo_yi_results %>% - ungroup() %>% - filter(exclusion_set == "complete", dataset == "blue tit") %>% - select(dataset, estimate_type, MA_mod, effects_analysis, -exclusion_set) %>% - group_by(estimate_type, dataset) %>% - transmute(tidy_mod = map(.x = MA_mod, - ~broom::tidy(.x, - conf.int = TRUE, - include_studies = TRUE) %>% - rename(study_id = term))) - }) %>% - mutate(MA_mean = map(tidy_mod, filter, type == "summary")) %>% - hoist(MA_mean, - mean = "estimate", - MA_conf.low = "conf.low", - MA_conf.high = "conf.high") %>% - mutate(max_min_est = map(tidy_mod, - ~ filter(.x, type == "study") %>% - summarise(max_est = max(estimate), - min_est = min(estimate)))) %>% - mutate(max_min_CI = map(tidy_mod, - ~ filter(.x, type == "study") %>% - summarise(max_upper_CI = max(conf.high), - min_lower_CI = min(conf.low)))) %>% - unnest_wider(col = c(max_min_est, max_min_CI)) %>% - ungroup %>% - rows_update({plot_data_logged %>% #hells yes to this gem of a function! - mutate(dataset = "eucalyptus") %>% - filter(type != "summary") %>% - nest(tidy_mod = c(-estimate_type, -dataset))}, - by = c("dataset", "estimate_type")) %>% - mutate(no_effect = - map_int(tidy_mod, - ~ filter(.x, - estimate >0 & conf.low <= 0 | estimate <0 & conf.high >= 0, - type == "study") %>% - nrow() ), - pos_sign = - map_int(tidy_mod, - ~ filter(.x, estimate >0, conf.low > 0, - type == "study") %>% - nrow()), - neg_sign = - map_int(tidy_mod, - ~ filter(.x, estimate < 0, conf.high < 0, - type == "study") %>% - nrow()), - total_effects = - map_int(tidy_mod, - ~ filter(.x, - type == "study") %>% - nrow() - )) %>% - select(-tidy_mod, -MA_mean) %>% - rename(MA_mean = mean) -``` +all_model_fits_yi <- + model_vars %>% + cross_join(., + {ManyEcoEvo::ManyEcoEvo_yi_results %>% + select(estimate_type, ends_with("set"), effects_analysis)}) %>% + ungroup() %>% + mutate(effects_analysis = + map(effects_analysis, + ~ .x %>% + select(any_of(c("id_col", "study_id")), + starts_with("box_cox_abs_dev"), + RateAnalysis, + PublishableAsIs, + ReviewerId, + box_cox_var) %>% + janitor::clean_names() %>% + mutate_if(is.character, factor) + ), + model_workflows = pmap(.l = list(outcome, + fixed_effects, + random_intercepts), + .f = create_model_workflow), + fitted_mod_workflow = map2(model_workflows, effects_analysis, poss_fit), #NOT MEANT TO BE TEST DAT + fitted_model = map(fitted_mod_workflow, poss_extract_fit_engine), + convergence = map(fitted_model, possibly_check_convergence), + singularity = map(fitted_model, possibly_check_singularity), + params = map(fitted_model, poss_parameters)) %>% + mutate( + across(where(is.list), + .fns = ~ coalesce(.x, list(NA))) + ) %>% + mutate(convergence = list_c(convergence), + singularity = list_c(singularity)) %>% + unnest_wider(random_intercepts, names_sep = "_") %>% + select(-outcome, + -model_workflows, + -fitted_mod_workflow, + -effects_analysis, + estimate_type) -### Out of sample predictions $y_i$ +yi_singularity_convergence_all <- + all_model_fits_yi %>% + left_join({all_model_fits_yi %>% + unnest(params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("SE"), list(is.infinite, is.na))) %>% + distinct(fixed_effects, + random_intercepts_1, + random_intercepts_2, + dataset, + estimate_type, + convergence, + singularity) %>% + mutate(SE_calc = FALSE)}) %>% + left_join({all_model_fits %>% + unnest(params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("CI"), list(is.infinite, is.na))) %>% + distinct(fixed_effects, + random_intercepts_1, + random_intercepts_2, + dataset, + estimate_type, + convergence, + singularity) %>% + mutate(CI_calc = FALSE)}) %>% + rowwise() %>% + mutate(across(ends_with("_calc"), + ~ replace_na(.x, TRUE))) %>% + mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) + +yi_singularity_convergence_all %>% + select(-fitted_model, -params) %>% + arrange(dataset, + estimate_type, + fixed_effects, + random_intercepts_1, + random_intercepts_2 + ) %>% + mutate(across(starts_with("random"), + ~ str_replace_all(.x, "_", " ") %>% + Hmisc::capitalize() %>% + str_replace("id", "ID")), + dataset = + case_when(dataset == "eucalyptus" ~ Hmisc::capitalize(dataset), + TRUE ~ dataset)) %>% + mutate(fixed_effects = forcats::fct_recode(fixed_effects, + "Categorical Peer Rating" = "publishable_as_is", + "Continuous Peer Rating" = "rate_analysis")) %>% + group_by(fixed_effects) %>% + arrange(fixed_effects, dataset, pick(starts_with("random"))) %>% + relocate(estimate_type,.after = dataset) %>% + gt::gt(rowname_col = "dataset") %>% + gt::text_transform( + locations = cells_body( + columns = fixed_effects, + rows = random_intercepts_1 != "Reviewer ID" + ), + fn = function(x){ + paste0("") + } + ) %>% + tab_style( + style = list( + cell_fill(color = scales::alpha("red", 0.6)), + cell_text(color = "white", weight = "bold") + ), + locations = list( + cells_body(columns = "singularity", rows = singularity == TRUE), + cells_body(columns = "convergence", rows = convergence == FALSE), #TODO why didn't work here?? + cells_body(columns = "SE_calc", rows = SE_calc == FALSE), + cells_body(columns = "CI_calc", rows = CI_calc == FALSE) + ) + ) %>% + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), + locations = cells_body(columns = c("singularity", "convergence", "SE_calc", "CI_calc"))) %>% + gt::opt_stylize(style = 6, color = "gray") %>% + gt::cols_label(dataset = "Dataset", + estimate_type = "Prediction Scenario", + fixed_effects = "Fixed Effect", + singularity = "Singular Fit?", + convergence = "Model converged?", + SE_calc = gt::md("Can random effects $\\text{SE}$ be calculated?"), + CI_calc = "Can random effect 95% CI be calculated?") %>% + gt::tab_spanner(label = "Random Effects", + columns = gt::starts_with("random")) %>% + gt::sub_missing() %>% + gt::cols_label_with(columns = gt::starts_with("random"), + fn = function(x) paste0("")) %>% + gt::tab_style(locations = + cells_body(rows = str_detect(dataset, "Eucalyptus"), + columns = dataset), + style = cell_text(style = "italic")) %>% + gt::text_transform(fn = function(x) str_replace(x, "publishable_as_is", "Categorical Peer Rating") %>% + str_replace(., "rate_analysis", "Continuous Peer Rating"), + locations = cells_body(columns = c("fixed_effects"))) %>% + gt::text_transform( + locations = cells_stub( + rows = estimate_type != "y25" + ), + fn = function(x){ + paste0("") + } + ) %>% + gt::tab_style(locations = cells_stub(rows = str_detect(dataset, "Eucalyptus")), + style = cell_text(style = "italic")) %>% + gt_fmt_yi(columns = "estimate_type") +``` ```{r} #| label: yi-Euc #| message: false #| warning: false #| error: true -#| echo: false #| results: 'hide' - -euc_yi_results <- - ManyEcoEvo::make_viz(deviation_models_yi_euc) +yi_fitted_mods <- + ManyEcoEvo::ManyEcoEvo_yi_viz %>% + filter(model_name %in% c("box_cox_rating_cat", + "box_cox_rating_cont", + "sorensen_glm", + "uni_mixed_effects")) %>% + select(-ends_with("_plot"), -MA_fit_stats, -contains("mod_")) %>% + rowwise() %>% + mutate(singularity = possibly_check_singularity(model), + convergence = list(possibly_check_convergence(model))) %>% + ungroup() %>% mutate( + across(where(is.list), + .fns = ~ coalesce(.x, list(NA))) + ) %>% + mutate(convergence = list_c(convergence), + singularity = case_when(is.na(convergence) ~ NA, + TRUE ~ singularity)) yi_convergence_singularity <- - ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - model_name %in% c("box_cox_rating_cat", "box_cox_rating_cont")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %in% - c("box_cox_rating_cat", "box_cox_rating_cont"))}) %>% - mutate(singularity = map_lgl(model, possibly_check_singularity), - convergence = map_lgl(model, possibly_check_convergence), - params = map(model, parameters::parameters), - convergence = ifelse(is.na(convergence), FALSE, convergence), - singularity = ifelse(is.na(singularity), FALSE, singularity)) %>% - select(dataset, estimate_type, model_name, singularity, convergence, params) %>% - mutate(model_name = forcats::as_factor(model_name), + yi_fitted_mods %>% + left_join({ # Check if SE and CI can be calculated + yi_fitted_mods %>% + unnest(model_params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("SE"), list(is.infinite, is.na))) %>% + distinct(dataset, estimate_type, model_name) %>% + mutate(SE_calc = FALSE) + }, by = join_by(dataset, estimate_type, model_name)) %>% + left_join({ + yi_fitted_mods %>% + unnest(model_params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("CI_"), list(is.infinite, is.na))) %>% + distinct(dataset, estimate_type, model_name) %>% + mutate(CI_calc = FALSE) + }, by = join_by(dataset, estimate_type, model_name)) %>% + rowwise() %>% + mutate(across(ends_with("_calc"), + ~ replace_na(.x, TRUE)), + across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence) | is_na(convergence), NA, .x)), + model_name = forcats::as_factor(model_name), model_name = forcats::fct_relevel(model_name, c("box_cox_rating_cat", - "box_cox_rating_cont")), - model_name = forcats::fct_recode(model_name, - `Deviation explained by categorical ratings` = "box_cox_rating_cat", - `Deviation explained by continuous ratings` = "box_cox_rating_cont"), + "box_cox_rating_cont", + "sorensen_glm", + "uni_mixed_effects")), + model_name = + forcats::fct_recode( + model_name, + `Deviation explained by categorical ratings` = "box_cox_rating_cat", + `Deviation explained by continuous ratings` = "box_cox_rating_cont", + `Deviation explained by Sorensen's index` = "sorensen_glm", + `Deviation explained by inclusion of random effects` = + "uni_mixed_effects"), dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", TRUE ~ dataset)) %>% - hoist(params, SD_calc = "SE",.remove = FALSE) %>% - mutate(SD_calc = map_lgl(SD_calc, ~ is.na(.x) %>% any(.) %>% isFALSE(.))) + ungroup() %>% + select(-model) yi_singularity_convergence_sorensen_mixed_mod <- - ManyEcoEvo_yi_viz %>% - bind_rows(euc_yi_results) %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - model_name %in% c("sorensen_glm")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %in% c("sorensen_glm", - "uni_mixed_effects"))}) %>% - mutate(singularity = - map_lgl(model, possibly_check_singularity), - convergence = - map_lgl(model, possibly_check_convergence_glm), - params = map(model, parameters::parameters), - dataset = - case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", - TRUE ~ dataset), - model_name = - forcats::as_factor(model_name), - model_name = - forcats::fct_relevel(model_name, - c("sorensen_glm","uni_mixed_effects")), - model_name = forcats::fct_recode(model_name, - `Deviation explained by Sorensen's index` = "sorensen_glm", - `Deviation explained by inclusion of random effects` = "uni_mixed_effects")) %>% - select(dataset, - estimate_type, - model_name, - singularity, - convergence, - params) %>% - group_by(model_name) + yi_convergence_singularity %>% + filter(str_detect(model_name, "Sorensen") | str_detect(model_name, "random")) ``` We fitted the same deviation models on the out-of-sample-predictions dataset that we fitted for the effect-size dataset. -However, while all models of deviation explained by categorical peer-ratings converged, the following datasets and estimate types suffered from singular fit: `r yi_convergence_singularity %>% filter(singularity == TRUE, str_detect(model_name, "categorical")) %>% unite("singular_models", dataset, estimate_type, sep = " - ") %>% pull(singular_models) %>% paste0(collapse = ", ") %>% str_replace("y25", "$y_{25}$") %>% str_replace("y50", "$y_{50}$") %>% str_replace("y75", "$y_{75}$") %>% str_replace("eucalyptus", "*Eucalyptus*")` (@tbl-yi-deviation-ratings-convergence-singularity). -Models of deviation explained by *continuous* ratings all converged, however models for the `r yi_convergence_singularity %>% filter(convergence == FALSE, str_detect(model_name, "continuous")) %>% unite("non_converged", dataset, estimate_type, sep = " - ") %>% pull(non_converged) %>% paste0(collapse = ", ") %>% str_replace("y25", "$y_{25}$") %>% str_replace("y50", "$y_{50}$") %>% str_replace("y75", "$y_{75}$") %>% str_replace("eucalyptus", "*Eucalyptus*")` out-of-sample predictions model fit was singular. -Similarly to the effect-size ($Z_r$) dataset, SD and CI could not be estimated for random effects in some models (@tbl-yi-deviation-ratings-convergence-singularity), consequently we interpreted this to mean the models had singular fit (See @sec-Zr-deviation-ratings). +However, while all models of deviation explained by categorical peer-ratings converged, the following datasets and prediction scenarios suffered from singular fit: `r yi_convergence_singularity %>% filter(singularity == TRUE, str_detect(model_name, "categorical")) %>% unite("singular_models", dataset, estimate_type, sep = " - ") %>% pull(singular_models) %>% paste0(collapse = ", ") %>% str_replace("y25", "$y_{25}$") %>% str_replace("y50", "$y_{50}$") %>% str_replace("y75", "$y_{75}$") %>% str_replace("Eucalyptus", "*Eucalyptus*")` (@tbl-yi-deviation-ratings-convergence-singularity). +Models of deviation explained by *continuous* ratings all converged, however models for the `r yi_convergence_singularity %>% filter(convergence == FALSE, str_detect(model_name, "continuous")) %>% unite("non_converged", dataset, estimate_type, sep = " - ") %>% pull(non_converged) %>% paste0(collapse = ", ") %>% str_replace("y25", "$y_{25}$") %>% str_replace("y50", "$y_{50}$") %>% str_replace("y75", "$y_{75}$") %>% str_replace("Eucalyptus", "*Eucalyptus*")` out-of-sample predictions model fit was singular. +Similarly to the effect-size ($Z_r$) dataset, $\text{SD}$ and CI could not be estimated for random effects in some models (@tbl-yi-deviation-ratings-convergence-singularity), consequently we interpreted this to mean the models had singular fit (See @sec-Zr-deviation-ratings). Results of all deviation models are therefore presented only for models with non-singular fit, and that converged (@tbl-yi-deviation-ratings-convergence-singularity). ```{r} #| label: tbl-yi-deviation-ratings-convergence-singularity #| tbl-cap: "Singularity and convergence checking for models of deviation in out-of-sample-predictions $y_i$ explained by peer-ratings." -yi_convergence_singularity %>% - select(-params) %>% - group_by(model_name) %>% + +yi_convergence_singularity %>% + filter(stringr::str_detect(model_name, "ratings")) %>% + select(-model_params) %>% + group_by(model_name) %>% gt::gt(rowname_col = "dataset") %>% - gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), - columns = dataset), + gt::tab_style(locations = + cells_body(rows = str_detect(dataset, "Eucalyptus"), + columns = dataset), style = cell_text(style = "italic")) %>% gt::cols_label(dataset = "Dataset", - estimate_type = "Estimate Type", + estimate_type = "Prediction Scenario", singularity = "Singular Fit?", convergence = "Model converged?", - SD_calc = "Can random effect SE be calculated?") %>% + SE_calc = gt::md("Can random effects $\\text{SE}$ be calculated?"), + CI_calc = "Can random effect CI be calculated?") %>% gt::opt_stylize(style = 6, color = "gray") %>% - gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", "no" ), + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), locations = cells_body(columns = c("singularity", "convergence", - "SD_calc") + "SE_calc", + "CI_calc") )) %>% gt::text_transform( locations = cells_stub( @@ -865,39 +901,40 @@ yi_convergence_singularity %>% locations = list( cells_body(columns = "singularity", rows = singularity == TRUE), cells_body(columns = "convergence", rows = convergence == FALSE), - cells_body(columns = "SD_calc", rows = SD_calc == FALSE) + cells_body(columns = "SE_calc", rows = SE_calc == FALSE), + cells_body(columns = "CI_calc", rows = CI_calc == FALSE) ) - ) + ) %>% + gt::sub_missing() ``` Group means and $95\%$ confidence intervals for different categories of peer-review rating are all overlapping (@fig-yi-deviation-cat-rating). -The fixed effect of peer review rating also explains virtually no variability in $y_i$ deviation score (@tbl-yi-deviation-ratings-convergence-singularity). +The fixed effect of peer review rating also explains virtually no variability in deviation scores for out-of-sample predictions $y_i$ (@tbl-yi-deviation-ratings-convergence-singularity). ```{r} -#| label: fig-yi-deviation-cat-rating -#| fig-cap: "Violin plot of Box-Cox transformed deviation from meta-analytic mean as a function of categorical peer-review rating. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. **A** blue tit dataset, $y_{50}$ **B** blue tit dataset, $y_{75}$ **C** *Eucalyptus* dataset, $y_{50}$." +#| label: compute-yi-deviation-cat-rating #| message: false -#| fig-height: 12 -#| fig-width: 7 -#| column: page-inset-right +#| warning: false -# Omit all singular models yi_violin_cat_plot_data <- - ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", #TODO NEED TO PULL OUT LAMBDA! - dataset == "blue tit", - model_name %in% c("box_cox_rating_cat")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %in% c("box_cox_rating_cat"))}) %>% + ManyEcoEvo::ManyEcoEvo_yi_viz %>% + filter(model_name %in% "box_cox_rating_cat") %>% + left_join(., + {ManyEcoEvo::ManyEcoEvo_yi_results %>% + select(dataset, estimate_type, effects_analysis) %>% + hoist(effects_analysis, "lambda", .transform = unique) %>% + select(-effects_analysis)}, + by = join_by(dataset, estimate_type)) %>% mutate( dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", TRUE ~ dataset)) %>% - semi_join({yi_convergence_singularity %>% # filter out singular models #TODO rm mods with NA/Inf/0 random effect SE - filter(singularity == FALSE, - str_detect(model_name, "categorical")) }, - by = join_by("dataset", "estimate_type")) %>% - select(dataset:model, -exclusion_set, -model_name) %>% + semi_join({ + yi_convergence_singularity %>% + filter( str_detect(model_name, "categorical"), + !singularity, convergence, SE_calc, CI_calc) + }, by = join_by("dataset", "estimate_type")) %>% + select(dataset, estimate_type, model_name, model) %>% mutate(predictor_means = - map(model, modelbased::estimate_means), + map(model, modelbased::estimate_means, backend = "marginaleffects" ), model_data = map(model, ~pluck(.x, "frame") %>% drop_na() %>% as_tibble()), @@ -916,7 +953,8 @@ yi_violin_cat_plot_data <- str_replace(PublishableAsIs, "publishable with ", "") %>% str_replace("deeply flawed and ", "") %>% - capwords()))) + capwords()))) %>% + select(-model) yi_violin_cat_plots <- yi_violin_cat_plot_data %>% pmap(.l = list(.$model_data, .$predictor_means, .$plot_name), @@ -928,10 +966,36 @@ yi_violin_cat_plots <- yi_violin_cat_plot_data %>% "Major Revision", "Minor Revision", "Publishable As Is"), - ..3)) + ..3)) %>% + purrr::set_names({yi_violin_cat_plot_data %>% + pull(plot_name) %>% + stringr::str_split("_violin_cat", 2) %>% + map_chr(pluck, 1) }) + + +subfigcaps_yi_cat <- yi_violin_cat_plot_data %>% + mutate(dataset = + case_when(dataset == "Eucalyptus" ~ paste0("*", dataset, "*"), + TRUE ~ Hmisc::capitalize(dataset))) %>% + unite(plot_name, dataset, estimate_type, sep = ", ") %>% + pull(plot_name) +fig_cap_yi_deviation_cat_rating <- + paste0("Violin plot of Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of categorical peer-review ratings ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. Grey points for each rating group denote model-estimated marginal mean deviation, and error bars denote 95% CI of the estimate. ", subfigcaps_yi_cat %>% + paste0(paste0(paste0("**", LETTERS[1:length(subfigcaps_yi_cat)], "**", sep = ""), sep = ": "), ., collapse = ", "), ".") +``` + + +```{r} +#| label: fig-yi-deviation-cat-rating +#| fig-cap: !expr fig_cap_yi_deviation_cat_rating +#| message: false +#| fig-height: 10 +#| fig-width: 10 +#| column: body-outset library(patchwork) -wrap_plots(yi_violin_cat_plots, ncol = 1, tag_levels = 'A', guides = 'collect') +patchwork::wrap_plots(yi_violin_cat_plots, ncol = 2, nrow = 2, guides = 'collect') + + patchwork::plot_annotation(tag_levels = 'A') ``` There was a lack of any clear relationships between quantitative review scores and $y_i$ deviation scores (@tbl-yi-deviation-parameter-estimates). @@ -942,23 +1006,20 @@ Because almost no variability in $y_i$ deviation score was explained by reviewer ```{r} #| label: calc-yi-deviation-cont-rating -# Omit all singular models + yi_cont_plot_data <- - ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - model_name %in% c("box_cox_rating_cont")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %in% c("box_cox_rating_cont"))}) %>% - mutate( dataset = case_when(str_detect(dataset, - "eucalyptus") ~ - "Eucalyptus", - TRUE ~ dataset)) %>% + ManyEcoEvo::ManyEcoEvo_yi_viz %>% + filter(model_name %in% c("box_cox_rating_cont")) %>% + mutate(dataset = case_match(dataset, "eucalyptus" ~ "Eucalyptus",.default = dataset)) %>% semi_join({yi_convergence_singularity %>% - filter(singularity == FALSE, SD_calc == TRUE, - str_detect(model_name, "cont")) }, + filter( str_detect(model_name, "cont"), # Omit all in-estimable models + !singularity, + convergence, + #SE_calc, + #CI_calc + )}, by = join_by("dataset", "estimate_type")) %>% - select(dataset:model, -exclusion_set, -model_name) %>% + select(dataset, estimate_type, model_name, model) %>% mutate(plot_data = map(model, pluck, "frame")) subfigcaps <- yi_cont_plot_data %>% @@ -969,36 +1030,37 @@ subfigcaps <- yi_cont_plot_data %>% pull(plot_name) fig_cap_yi_deviation_cont_rating <- - paste0("Scatterplots explaining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of continuous ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean.", subfigcaps %>% - paste0(paste0(paste0("**", LETTERS[1:4], "**", sep = ""), sep = ": "), ., collapse = ", "), ".") + paste0("Scatterplots explaining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of continuous ratings. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. ", subfigcaps %>% + paste0(paste0(paste0("**", LETTERS[1:length(subfigcaps)], "**", sep = ""), sep = ": "), ., collapse = ", "), ".") ``` ```{r} #| label: fig-yi-deviation-cont-rating #| message: false #| warning: false -#| layout-nrow: 2 #| fig-cap: !expr fig_cap_yi_deviation_cont_rating -#| fig-height: 8 -#| echo: false +#| fig-height: 6 +#| fig-width: 8 yi_cont_plots <- yi_cont_plot_data$plot_data %>% - map(.f = ~ plot_continuous_rating(.x)) + map(.f = ~ plot_continuous_rating(.x)) %>% + purrr::set_names({yi_cont_plot_data %>% + unite(plot_name, dataset, estimate_type, sep = " ") %>% + pull(plot_name)}) -patchwork::wrap_plots(yi_cont_plots,heights = 4, byrow = TRUE) + +patchwork::wrap_plots(yi_cont_plots, heights = 4, byrow = TRUE) + patchwork::plot_annotation(tag_levels = 'A') ``` ```{r} #| label: tbl-yi-deviation-model-params #| tbl-cap: "Parameter estimates for univariate models of Box-Cox transformed deviation from the mean $y_i$ estimate as a function of categorical peer-review rating, continuous peer-review rating, and Sorensen's index for blue tit and *Eucalyptus* analyses, and also for the inclusion of random effects for *Eucalyptus* analyses." -#| column: page-right +#| column: page ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - model_name %nin% c("MA_mod", "box_cox_rating_cat_no_int")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %nin% c("MA_mod", "box_cox_rating_cat_no_int"))}) %>% + filter( + model_name %nin% c("MA_mod", + "box_cox_rating_cat_no_int", + "MA_mod_mv")) %>% mutate( dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", TRUE ~ dataset), model_name = forcats::as_factor(model_name) %>% @@ -1006,46 +1068,50 @@ ManyEcoEvo_yi_viz %>% "box_cox_rating_cont", "sorensen_glm", "uni_mixed_effects")) %>% - forcats::fct_recode(`Deviation explained by categorical ratings` = "box_cox_rating_cat", - `Deviation explained by continuous ratings` = "box_cox_rating_cont", - `Deviation explained by Sorensen's index` = "sorensen_glm", - `Deviation explained by inclusion of random effects` = "uni_mixed_effects")) %>% + forcats::fct_recode( + `Deviation explained by categorical ratings` = "box_cox_rating_cat", + `Deviation explained by continuous ratings` = "box_cox_rating_cont", + `Deviation explained by Sorensen's index` = "sorensen_glm", + `Deviation explained by inclusion of random effects` = "uni_mixed_effects") + ) %>% semi_join( - {bind_rows(yi_singularity_convergence_sorensen_mixed_mod, - yi_convergence_singularity) %>% - filter(singularity == FALSE, convergence == TRUE, SD_calc == TRUE) }, + {yi_convergence_singularity %>% + filter(!singularity, + convergence, + SE_calc, + CI_calc) }, by = join_by("dataset", "estimate_type", "model_name") ) %>% - select(dataset, estimate_type, model_name, model) %>% - mutate(tbl_output = map(model, parameters::parameters) - ) %>% select(dataset, estimate_type, model_name, - tbl_output) %>% - unnest(tbl_output) %>% - mutate(dataset = case_when(str_detect(dataset, "eucalyptus") ~ "*Eucalyptus*", - TRUE ~ dataset), - Group = case_when(Group == "study_id" ~ "Effect ID", - Group == "ReviewerId" ~ "Reviewer ID", - TRUE ~ Group), - df_error = as.integer(df_error), - Parameter = str_remove(Parameter, "PublishableAsIs") %>% - str_replace("diversity", "Sorensen's") %>% - str_replace_all(., "_", " ") %>% - str_remove(., "1") %>% - Hmisc::capitalize() ) %>% + model_params) %>% + unnest(model_params) %>% + mutate( + Group = case_match(Group, + "study_id" ~ "Effect ID", + "ReviewerId" ~ "Reviewer ID", + "" ~ NA, + .default = Group), + df_error = as.integer(df_error), + Parameter = str_remove(Parameter, "PublishableAsIs") %>% + str_replace("diversity", "Sorensen's") %>% + str_replace_all(., "_", " ") %>% + str_remove(., "1") %>% + Hmisc::capitalize() ) %>% group_by(model_name) %>% - arrange(desc(model_name), + arrange(model_name, dataset, estimate_type) %>% select(-CI) %>% - dplyr::filter(dataset != "blue tit" | str_detect(model_name, "random", negate = TRUE)) %>% gt::gt(rowname_col = "dataset") %>% gt::fmt(columns = "p", fns = function(x) gtsummary::style_pvalue(x)) %>% gt::cols_label(CI_low = gt::md("95\\%CI"), - estimate_type = "Estimate Type") %>% - gt::cols_label(df_error = "df") %>% + estimate_type = "Prediction Scenario", + SE = gt::md("$\\text{SE}$"), + df_error = gt::md("$\\mathit{df}$"), + t = gt::md("$t$"), + p = gt::md("*p*")) %>% gt::cols_merge(columns = starts_with("CI_"), pattern = "[{1},{2}]") %>% gt::cols_move(columns = CI_low, after = SE) %>% @@ -1061,6 +1127,7 @@ ManyEcoEvo_yi_viz %>% gt::cols_move(columns = c(Effects, Group), after = Parameter) %>% gt::sub_missing(columns = c(Effects, Group, t, df_error, p), missing_text = "") %>% + gt::cols_hide(Effects) %>% gt::text_transform(fn = function(x) map(x, gt::md), locations = gt::cells_row_groups()) %>% gt::text_transform( @@ -1071,15 +1138,20 @@ ManyEcoEvo_yi_viz %>% paste0("") } ) %>% - gt::fmt(columns = c(Coefficient, SE, t, starts_with("CI_")) , - # rows = Parameter %in% c("RateAnalysis", "SD (Observations)", "mixed_model1"), - fns = function(x) ifelse(x < 0.0009, - format(x, nsmall = 2, digits = 1), - round(x, digits = 2))) %>% + gt::fmt_number(columns = c(Coefficient, SE, t, starts_with("CI_")), decimals = 2,drop_trailing_zeros = TRUE, drop_trailing_dec_mark = TRUE) %>% + gt::fmt_scientific(columns = c( starts_with("CI_")), + rows = abs(CI_low) < 0.01 | abs(CI_high) < 0.01 | abs(CI_low) > 1000 | abs(CI_high) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c( starts_with("Coefficient")), + rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c( starts_with("SE")), + rows = abs(SE) < 0.01 | abs(SE) > 1000, + decimals = 2) %>% gt::tab_style(locations = gt::cells_stub(rows = str_detect(dataset, "Eucalyptus")), style = cell_text(style = "italic")) %>% - gt_fmt_yi("estimate_type") %>% - gt::as_raw_html() + gt::cols_label(Group = "Random Effect") %>% + gt_fmt_yi("estimate_type") ``` @@ -1095,33 +1167,40 @@ All models fitted without problem. #| message: false #| warning: false yi_sorensen_plot_data <- - ManyEcoEvo::ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - str_detect(model_name, "sorensen_glm")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %in% c("sorensen_glm"))}) %>% - mutate( dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", - TRUE ~ dataset)) %>% + ManyEcoEvo_yi_viz %>% + filter(str_detect(model_name, "sorensen_glm")) %>% + mutate( dataset = + case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", + TRUE ~ dataset), + model_name = forcats::as_factor(model_name) %>% + forcats::fct_relevel(c("box_cox_rating_cat", + "box_cox_rating_cont", + "sorensen_glm", + "uni_mixed_effects")) %>% + forcats::fct_recode( + `Deviation explained by categorical ratings` = "box_cox_rating_cat", + `Deviation explained by continuous ratings` = "box_cox_rating_cont", + `Deviation explained by Sorensen's index` = "sorensen_glm", + `Deviation explained by inclusion of random effects` = "uni_mixed_effects")) %>% select(dataset, estimate_type, model_name, model) %>% semi_join( - {yi_singularity_convergence_sorensen_mixed_mod %>% - filter(str_detect(model_name, "Sorensen"), - singularity == FALSE)}, - by = join_by("dataset", "estimate_type") + {yi_convergence_singularity %>% + filter(!singularity, + convergence, + SE_calc, CI_calc) }, + by = join_by("dataset", "estimate_type", "model_name") ) %>% - mutate(dataset = case_when(dataset == "Eucalyptus" ~ paste0("*", dataset, "*"), - TRUE ~ Hmisc::capitalize(dataset)), - plot_data = map(model, ~ pluck(.x, "fit", "data") %>% - rename(box_cox_abs_deviation_score_estimate = ..y))) %>% + mutate( + plot_data = map(model, ~ pluck(.x, "fit", "data") %>% + rename(box_cox_abs_deviation_score_estimate = ..y))) %>% unite(plot_names, dataset, estimate_type, sep = ", ") yi_sorensen_subfigcaps <- yi_sorensen_plot_data$plot_names %>% - paste0(paste0(paste0("**", LETTERS[1:4], "**", sep = ""), sep = ": "), ., collapse = ", ") + paste0(paste0(paste0("**", LETTERS[1:length(yi_sorensen_plot_data$plot_names)], "**", sep = ""), sep = ": "), ., collapse = ", ") -yi_sorensen_fig_cap <- paste0("Scatter plots examining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of Sorensen's similarity index. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean. ", - yi_sorensen_plot_data$plot_names %>% paste0(paste0(paste0("**", LETTERS[1:6], "**", sep = ""), sep = ": "), ., collapse = ", "), +yi_sorensen_fig_cap <- paste0("Scatter plots examining Box-Cox transformed deviation from the meta-analytic mean for $y_i$ estimates as a function of Sorensen's similarity index. Note that higher (less negative) values of the deviation score result from greater deviation from the meta-analytic mean (@fig-box-cox-transformations). ", + yi_sorensen_plot_data$plot_names %>% paste0(paste0(paste0("**", LETTERS[1:length(yi_sorensen_plot_data$plot_names)], "**", sep = ""), sep = ": "), ., collapse = ", "), ".") ``` @@ -1131,17 +1210,19 @@ yi_sorensen_fig_cap <- paste0("Scatter plots examining Box-Cox transformed devia #| layout-nrow: 2 #| fig-height: 8 #| message: false -#| echo: false yi_sorensen_plots <- map2(.x = yi_sorensen_plot_data$model, .y = yi_sorensen_plot_data$plot_data, - .f = ~ walk_plot_effects_diversity(model = .x, plot_data = .y)) + .f = ~ walk_plot_effects_diversity(model = .x, plot_data = .y)) %>% + purrr::set_names(yi_sorensen_plot_data$plot_names) patchwork::wrap_plots(yi_sorensen_plots,heights = 4, byrow = TRUE) + patchwork::plot_annotation(tag_levels = 'A') ``` +We checked the fitted models for the inclusion of random effects for the *Eucalyptus* dataset, and for models of deviation explained by Sorensen's similarity index for $y_i$ estimates (@tbl-deviation-similarity-convergence-singularity-yi). All models converged, and no singular fits were encountered. + ```{r} #| label: tbl-deviation-similarity-convergence-singularity-yi #| tbl-cap: "Singularity and convergence checks for models of deviation explained by Sorensen's similarity index and inclusion of random effects for out-of-sample predictions, $y_i$. Models of Deviation explained by inclusion of random effects are not presented for blue tit analyses because the number of models not using random effects was less than our preregistered threshold." @@ -1149,18 +1230,20 @@ patchwork::wrap_plots(yi_sorensen_plots,heights = 4, byrow = TRUE) + #| message: false yi_singularity_convergence_sorensen_mixed_mod %>% - filter(dataset != "blue tit" | str_detect(model_name, - "random", - negate = TRUE)) %>% - select(-params) %>% + drop_na(convergence) %>% + mutate(across(c(SE_calc, CI_calc, singularity), ~ ifelse(is_false(convergence), NA, .x))) %>% + select(-model_params) %>% + group_by(model_name) %>% gt::gt(rowname_col = "dataset") %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), style = cell_text(style = "italic")) %>% gt::cols_label(dataset = "Dataset", - estimate_type = "Estimate Type", + estimate_type = "Prediction Scenario", singularity = "Singular Fit?", - convergence = "Model converged?") %>% + convergence = "Model converged?", + SE_calc = gt::md("Can random effects $\\text{SE}$ be calculated?"), + CI_calc = "Can random effects CI be calculated?") %>% gt::opt_stylize(style = 6, color = "gray") %>% tab_style( style = list( @@ -1169,12 +1252,16 @@ yi_singularity_convergence_sorensen_mixed_mod %>% ), locations = list( cells_body(columns = "singularity", rows = singularity == TRUE), - cells_body(columns = "convergence", rows = convergence == FALSE) - ) - ) %>% - gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", "no" ), + cells_body(columns = "convergence", rows = convergence == FALSE), + cells_body(columns = "SE_calc", rows = SE_calc == FALSE), + cells_body(columns = "CI_calc", rows = CI_calc == FALSE) + )) %>% + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), locations = cells_body(columns = c("singularity", - "convergence") + "convergence", + "SE_calc", + "CI_calc") )) %>% gt::text_transform( locations = cells_stub( @@ -1193,11 +1280,11 @@ yi_singularity_convergence_sorensen_mixed_mod %>% ### Out of sample predictions $y_i$ -Only `r ManyEcoEvo_yi$data[[1]] %>% select(id_col, mixed_model) %>% count(mixed_model) %>% filter(mixed_model == 0) %>% pluck("n")` of the Blue tit out-of-sample analyses ($y_i$) included random effects, which was below our preregistered threshold of 5 for running the models of Box-Cox transformed deviation from the meta-analytic mean explained by the inclusion of random-effects. However, `r ManyEcoEvo_yi$data[[2]] %>% select(id_col, mixed_model) %>% count(mixed_model) %>% filter(mixed_model == 0) %>% pluck("n")` *Eucalyptus* analyses included in the out-of-sample ($y_{i}$) results included only fixed effects, which crossed our pre-registered threshold. +Only `r ManyEcoEvo_yi$data[[1]] %>% select(id_col, mixed_model) %>% count(mixed_model) %>% filter(mixed_model == 0) %>% pluck("n")` of the Blue tit out-of-sample analyses $y_i$ included random effects, which was below our preregistered threshold of 5 for running the models of Box-Cox transformed deviation from the meta-analytic mean explained by the inclusion of random-effects. However, `r ManyEcoEvo_yi$data[[2]] %>% select(id_col, mixed_model) %>% count(mixed_model) %>% filter(mixed_model == 0) %>% pluck("n")` *Eucalyptus* analyses included in the out-of-sample $y_{i}$ results included only fixed effects, which crossed our pre-registered threshold. Consequently, we performed this analysis for the *Eucalyptus* dataset only, here we present results for the out of sample prediction $y_{i}$ results. -There is consistent evidence of somewhat higher Box-Cox-transformed deviation values for models including a random effect, meaning the models including random effects averaged slightly higher deviation from the meta-analytic means. -This is most evident for the $y_{50}$ predictions which both shows the greatest difference in Box-Cox transformed deviation values (@fig-yi-euc-deviation-RE-plots) and explains the most variation in $y_i$ deviation score (@tbl-yi-deviation-parameter-estimates). +There is inconsistent evidence of somewhat higher Box-Cox-transformed deviation values for models including a random effect, meaning the analyses of the *Eucalyptus* dataset that included random effects averaged slightly higher deviation from the meta-analytic mean out-of-sample estimate in the relevant prediction scenario. +This is most evident for the $y_{25}$ predictions which both shows the greatest difference in Box-Cox transformed deviation values (@fig-yi-euc-deviation-RE-plots) and explains the most variation in $y_i$ deviation score (@tbl-yi-deviation-model-params). ```{r} #| label: calc-yi-euc-deviation-RE-plots @@ -1205,16 +1292,19 @@ This is most evident for the $y_{50}$ predictions which both shows the greatest #| message: false yi_deviation_RE_plot_data <- - euc_yi_results %>% - filter(str_detect(model_name, "uni_mixed_effects")) %>% - select(dataset, estimate_type, model) %>% + ManyEcoEvo_yi_results %>% + mutate(dataset = Hmisc::capitalize(dataset)) %>% + semi_join({yi_singularity_convergence_sorensen_mixed_mod %>% filter(!singularity, convergence, SE_calc, CI_calc, str_detect(model_name, "random"))}, by = join_by(dataset, estimate_type)) %>% + select(dataset, estimate_type, model = uni_mixed_effects) %>% + rowwise() %>% + filter(!is_logical(model)) %>% ungroup %>% mutate(predictor_means = map(model, .f = ~ pluck(.x, "fit") %>% modelbased::estimate_means(.)), plot_data = map(model, pluck, "fit", "data"), plot_data = map(plot_data, rename, box_cox_abs_deviation_score_estimate = ..y)) %>% - mutate(dataset = Hmisc::capitalize(dataset) %>% paste0("*", ., "*")) %>% + mutate(dataset = case_when(str_detect(dataset, "Eucalyptus") ~ paste0("*", dataset, "*"), TRUE ~ dataset)) %>% unite(plot_names, dataset, estimate_type, sep = ", ") yi_deviation_RE_plot_subfigcaps <- yi_deviation_RE_plot_data %>% @@ -1233,13 +1323,13 @@ yi_deviation_RE_plot_figcap <- #| fig-cap: !expr yi_deviation_RE_plot_figcap #| layout-nrow: 1 #| fig-width: 8 -#| column: page-inset-right -#| echo: false +#| column: body-outset yi_deviation_RE_plots <- yi_deviation_RE_plot_data %>% map2(.x = .$plot_data, .y = .$predictor_means, - .f = ~ plot_model_means_RE(.x, mixed_model, .y)) + .f = ~ plot_model_means_RE(.x, mixed_model, .y)) %>% + set_names(yi_deviation_RE_plot_subfigcaps) patchwork::wrap_plots(yi_deviation_RE_plots, byrow = TRUE) + patchwork::plot_annotation(tag_levels = 'A') + @@ -1255,55 +1345,15 @@ patchwork::wrap_plots(yi_deviation_RE_plots, byrow = TRUE) + ```{r} #| label: multivariate-models-mod-summary -fit_multivar_wrap <- function(data_tbl, ..., env = rlang::caller_env()){ - f1 <- rlang::new_formula(expr(box_cox_abs_deviation_score_estimate), - expr(RateAnalysis + - PublishableAsIs + - mean_diversity_index + - (1 | ReviewerId)), env = env) - - f2 <- rlang::new_formula(expr(box_cox_abs_deviation_score_estimate), - expr(RateAnalysis + - PublishableAsIs + - mean_diversity_index + - mixed_model + - (1 | ReviewerId)), env = env) - - pass_threshold <- - data_tbl %>% - count(mixed_model) %>% - pointblank::test_col_vals_gte(n, 5) - - if (pass_threshold == TRUE) { - cli::cli_alert_info("Model with random effects included") - } else ( - cli::cli_alert_info("Model with random effects excluded") - ) - #TODO MAKE SURE GIVES CORRECT EX - f <- if (pass_threshold) f2 else f1 # MAKE SURE RETURNS APPROPIRATELY - - mod <- inject(lme4::lmer(!!f, data = data_tbl, ...)) - - return(mod) - -} - filter_vars <- rlang::exprs(exclusion_set == "complete", expertise_subset == "All", publishable_subset == "All", collinearity_subset == "All") multivar_mods <- - ManyEcoEvo_results %>% - dplyr::filter(!!!filter_vars) %>% - group_by(dataset) %>% - select(dataset, effects_analysis) %>% - mutate(effects_analysis = map(effects_analysis, ~ .x %>% - unnest(review_data))) %>% - mutate(model = map(effects_analysis, fit_multivar_wrap)) %>% - mutate(model_perf = map(model, performance::model_performance)) %>% - mutate(model_params = map(model, parameters::parameters)) %>% - hoist(model_perf, "R2_conditional", "R2_marginal", "Sigma") + ManyEcoEvo_viz %>% + dplyr::filter(!!!filter_vars, model_name == "MA_mod_mv") %>% + hoist(mod_fit_stats, "R2_conditional", "R2_marginal", "Sigma") bt_multivar_mod_R <- multivar_mods %>% @@ -1323,33 +1373,50 @@ euc_multivar_mod_R <- bt_multivar_mod_sigma <- multivar_mods %>% filter(dataset == "blue tit") %>% - pluck("Sigma") + round_pluck("Sigma") euc_multivar_mod_sigma <- multivar_mods %>% filter(dataset == "eucalyptus") %>% - pluck("Sigma") + round_pluck("Sigma") ``` ```{r} #| label: tbl-multivariate-models-coefs -#| echo: false #| message: false -#| column: page-right -#| tbl-cap: "Parameter estimates from models explaining Box-Cox transformed deviation scores from the mean $Z_r$ as a function of continuous and categorical peer-review ratings in multivariate analyses. Standard Errors (SE), 95% confidence intervals (95% CI) are reported for all estimates, while t values, degrees of freedom and p-values are presented for fixed-effects." +#| column: body-outset +#| tbl-cap: "Parameter estimates from models explaining Box-Cox transformed deviation scores from the mean $Z_r$ as a function of continuous and categorical peer-review ratings in multivariate analyses. Standard Errors ($SE$), 95% confidence intervals (95% CI) are reported for all estimates, while $\\mathit{t}$ values, degrees of freedom ($\\mathit{df}$) and $p$-values are presented for fixed-effects." multivar_mods %>% select(dataset, model_params) %>% unnest(model_params) %>% select(-CI) %>% mutate( - dataset = str_replace(dataset, "eucalyptus", "*Eucalyptus*"), - Parameter = str_replace(Parameter, "mixed_model", "random_included")) %>% + dataset = + str_replace(dataset, "eucalyptus", "*Eucalyptus*"), + Parameter = + str_replace(Parameter, "mixed_model", "random_included")) %>% + group_by(dataset) %>% gt::gt() %>% + gt::fmt_number(columns = c(Coefficient, SE, starts_with("CI_"), t), + decimals = 2, + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE) %>% + gt::fmt_scientific( + columns = c( starts_with("CI_")), + rows = abs(CI_low) < 0.01 | abs(CI_high) < 0.01 | abs(CI_low) > 1000 | abs(CI_high) > 1000, + decimals = 2) %>% + gt::fmt_scientific( + columns = c( starts_with("Coefficient")), + rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000, + decimals = 2) %>% gt::fmt(columns = "p", - fns = function(x) gtsummary::style_pvalue(x, prepend_p = TRUE)) %>% - gt::fmt(columns = function(x) rlang::is_bare_numeric(x),fns = function(x) round(x, 3)) %>% + fns = function(x) gtsummary::style_pvalue(x, + prepend_p = TRUE) + ) %>% gt::cols_label(CI_low = gt::md("95\\%CI"), - df_error = "df") %>% + df_error = "df", + p = gt::md("*p*"), + SE = gt::md("$\\text{SE}$")) %>% gt::cols_merge(columns = starts_with("CI_"), pattern = "[{1},{2}]") %>% gt::cols_move(columns = CI_low, after = SE) %>% @@ -1365,7 +1432,9 @@ multivar_mods %>% gt::text_transform(fn = function(x) str_replace(x, "ReviewerId", "Reviewer ID")) %>% gt::text_transform(fn = function(x) map(x, gt::md), locations = gt::cells_row_groups()) %>% - gt::sub_missing(missing_text = "") + gt::sub_missing(missing_text = "") %>% + gt::cols_hide(Effects) %>% + gt::cols_label(Group = "Random Effect") multivar_mod_tidy <- multivar_mods %>% pull(model, name = "dataset") %>% @@ -1377,7 +1446,7 @@ multivar_performance_tidy <- multivar_mods %>% ``` The multivariate models did a poor job of explaining how different from the meta-analytic mean each analysis would be. -For the blue tit analyses the $R^{2}$ value for the whole model was `r round(bt_multivar_mod_R[2],2)` and for the fixed effects component was `r round (bt_multivar_mod_R[1],2)`, and the residual standard deviation for the model was `r round(bt_multivar_mod_sigma,2)`. +For the blue tit analyses the $R^{2}$ value for the whole model was `r round_pluck(bt_multivar_mod_R, "R2_conditional")` and for the fixed effects component was `r round_pluck(bt_multivar_mod_R, "R2_marginal")`, and the residual standard deviation for the model was `r round(bt_multivar_mod_sigma,2)`. Further, all of the fixed effects had 95% confidence intervals that overlaped 0. This evidence is all consistent with none of the predictor variables in this model (continuous review rating, categorical review rating, distinctiveness of variables included) having any meaningful effect on how far $Z_r$ estimates fell from the meta-analytic mean for the blue tit analyses. The pattern is largely similar for the *Eucalyptus* multivariate analysis, in which $R^{2}$ for the whole model was `r round(euc_multivar_mod_R[2],2)` and for the fixed effects component was `r round (euc_multivar_mod_R[1],2)`, and the residual standard deviation for the model was `r round(euc_multivar_mod_sigma,2)`. @@ -1395,8 +1464,9 @@ We report this analysis only for the sake of transparency. multivar_performance_tidy %>% select(dataset, starts_with("R2_"), ICC, RMSE, Sigma) %>% - mutate(dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", - TRUE ~ dataset)) %>% + mutate(dataset = + case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", + TRUE ~ dataset)) %>% gt::gt() %>% gt::fmt(columns = function(x) rlang::is_bare_numeric(x), fns = function(x) round(x, 2)) %>% @@ -1405,153 +1475,147 @@ multivar_performance_tidy %>% R2_marginal = gt::md("$$R^{2}_\\text{Marginal}$$"), Sigma = gt::md("$$\\sigma$$"), dataset = "Dataset") %>% - gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), - columns = dataset), + gt::tab_style(locations = + cells_body(rows = str_detect(dataset, "Eucalyptus"), + columns = dataset), style = cell_text(style = "italic")) %>% - gt::as_raw_html() + gt::fmt_number(columns = c(R2_conditional, R2_marginal, ICC, Sigma), + decimals = 2, + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE) ``` ### Out of sample predictions $y_i$ +For the blue tit analyses, the only models that did converge, which were not singular and that had estimable random effect variances were the $y_{50}$ and $y_{75}$ prediction scenarios with Reviewer ID as the model random effect (@tbl-yi-multivar-singularity-convergence). Of the different random effects structures we trialled for the *Eucalyptus* analyses, only the model that included Reviewer ID as the random effect successfully fitted to the $y_{50}$ and $y_{75}$ prediction scenarios, with other models either failing to converge due to complete separation (`lme4::` error: `Downdated VtV is not positive definite`, see ). + ```{r} #| label: tbl-yi-multivar-singularity-convergence #| tbl-cap: "Singularity and convergence checks for all combinations of random effects specifications trialled for across subsets of out of sample predictions $y_i$ from multivariate models." +#| seed: 1234 -formula_study_id <- workflow() %>% - add_variables(outcomes = box_cox_abs_deviation_score_estimate, - predictors = c(publishable_as_is, - rate_analysis, - mean_diversity_index, - mixed_model, - study_id)) %>% - add_model(model, - formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + rate_analysis + mean_diversity_index + mixed_model + (1 | study_id )) - -formula_ReviewerId <- workflow() %>% - add_variables(outcomes = box_cox_abs_deviation_score_estimate, - predictors = c(publishable_as_is, - rate_analysis, - mean_diversity_index, - mixed_model, - reviewer_id)) %>% - add_model(model, - formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + rate_analysis + mean_diversity_index + mixed_model + (1 | reviewer_id )) - -formula_both <- workflow() %>% - add_variables(outcomes = box_cox_abs_deviation_score_estimate, - predictors = c(publishable_as_is, - rate_analysis, - mean_diversity_index, - mixed_model, - reviewer_id, - study_id)) %>% - add_model(model, - formula = box_cox_abs_deviation_score_estimate ~ publishable_as_is + rate_analysis + mean_diversity_index + mixed_model + (1 | study_id) + (1 | reviewer_id)) - -# ---- Create DF for combinatorial model specification ---- possibly_parameters <- possibly(parameters::parameters, otherwise = NA) poss_extract_fit_engine <- possibly(extract_fit_engine, otherwise = NA) -model_vars_multivar <- - bind_rows( - tidyr::expand_grid(outcome = "box_cox_abs_deviation_score_estimate", - random_intercepts = c("study_id", - "reviewer_id")) %>% - rowwise() %>% - mutate(random_intercepts = as.list(random_intercepts)), - tidyr::expand_grid(outcome = "box_cox_abs_deviation_score_estimate", - random_intercepts = c("study_id", - "reviewer_id")) %>% - group_by(outcome) %>% - reframe(random_intercepts = list(random_intercepts)) - ) %>% - mutate(fixed_effects = list(c("publishable_as_is", - "rate_analysis", - "mean_diversity_index", - "mixed_model"))) +# ---- Create DF for combinatorial model specification ---- + +model_formulas_multivar <- + tidyr::expand_grid(outcome = "box_cox_abs_deviation_score_estimate", + random_intercepts = list("study_id", + "reviewer_id", + c("study_id", + "reviewer_id")), + fixed_effects = list(c("publishable_as_is", + "rate_analysis", + "mean_diversity_index", + "mixed_model"), + c("publishable_as_is", + "rate_analysis", + "mean_diversity_index"))) %>% + rowwise() %>% + mutate(dataset = case_when(length(fixed_effects) == 4 ~ "eucalyptus", + TRUE ~ "blue tit"), + wflow_id = paste0("RE:", + paste0(random_intercepts, collapse = "_"))) %>% + unite(wflow_id, dataset, wflow_id, remove = FALSE) %>% + rowwise() %>% + mutate(model_formulas = + list(create_model_formulas(outcome, + fixed_effects, + random_intercepts)) %>% + set_names(wflow_id), + model_workflows = list(create_model_workflow(outcome, + fixed_effects, + random_intercepts)) %>% + set_names(wflow_id)) all_model_fits_multivar <- ManyEcoEvo_yi_results %>% - filter(dataset == "blue tit", exclusion_set == "complete") %>% - ungroup %>% select(dataset, estimate_type, effects_analysis) %>% - bind_rows({deviation_models_yi_euc %>% - ungroup %>% - select(dataset, estimate_type, effects_analysis) }) %>% - cross_join(model_vars_multivar) %>% + group_by(dataset, estimate_type) %>% + nest_join(model_formulas_multivar %>% + select(dataset, + model_workflows, + fixed_effects, + random_intercepts), + by = join_by(dataset), + name = "model_workflow_sets") %>% + unnest(model_workflow_sets) %>% + rowwise() %>% mutate(effects_analysis = - map(effects_analysis, - mutate, - weight = importance_weights(1/box_cox_var)), - effects_analysis = - map(effects_analysis, - ~ .x %>% - unnest(review_data) %>% - select(study_id, - starts_with("box_cox_abs_dev"), - RateAnalysis, - PublishableAsIs, - ReviewerId, - box_cox_var, - weight, - mean_diversity_index, - mixed_model) %>% - janitor::clean_names() %>% - mutate_if(is.character, factor) - ), - model_workflows = pmap(.l = list(outcome, - fixed_effects, - random_intercepts), - .f = create_model_workflow), - fitted_mod_workflow = map2(model_workflows, - effects_analysis, - poss_fit), - fitted_model = map(fitted_mod_workflow, poss_extract_fit_engine), - convergence = map_if(fitted_model, - ~ !is.na(.x), - possibly_check_convergence) %>% - as.logical(), - singularity = map_if(fitted_model, - ~ !is.na(.x), - possibly_check_singularity) %>% - as.logical(), - params = map_if(fitted_model, - ~ !is.na(.x), - parameters::parameters), - fixed_effects = map_chr(fixed_effects, paste0, collapse = ", ") + list(effects_analysis %>% + select(study_id, + starts_with("box_cox_abs_dev"), + RateAnalysis, + PublishableAsIs, + ReviewerId, + box_cox_var, + mean_diversity_index, + mixed_model) %>% + janitor::clean_names() %>% + mutate_if(is.character, factor)), + fitted_mod_workflow = list(poss_fit(model_workflows, + effects_analysis)), + fitted_model = list(poss_extract_fit_engine(fitted_mod_workflow)), + convergence = list(if (!is.na(fitted_model)) + possibly_check_convergence(fitted_model)), + singularity = list(if (!is.na(fitted_model)) + possibly_check_singularity(fitted_model)), + params = list(if (!is.na(fitted_model)) + possibly_parameters(fitted_model)), + fixed_effects = paste0(fixed_effects, collapse = ", ") ) %>% unnest_wider(random_intercepts, names_sep = "_") %>% - select(-outcome, - -model_workflows, - -fitted_mod_workflow, - -effects_analysis, - estimate_type) %>% - replace_na(list(convergence = FALSE, singularity = TRUE)) + unnest(c(convergence, singularity)) %>% + rowwise() %>% + replace_na(list(convergence = FALSE)) %>% + select(-model_workflows, -fitted_mod_workflow, -effects_analysis) yi_multivar_singularity_convergence <- all_model_fits_multivar %>% left_join({all_model_fits_multivar %>% unnest(params) %>% filter(Effects == "random") %>% - filter(is.infinite(CI_high) | is.na(SE)) %>% + filter(is.na(SE) | is.infinite(SE)) %>% distinct(fixed_effects, random_intercepts_1, random_intercepts_2, dataset, - estimate_type, - convergence, - singularity) %>% - mutate(SD_calc = FALSE)}) %>% - mutate(model_na = is.na(fitted_model), - SD_calc = ifelse(is.na(fitted_model), FALSE, SD_calc), - SD_calc = ifelse(is.na(SD_calc), TRUE, SD_calc)) - -# If singularity == FALSE and convergence == TRUE, but the model appears here, then that's because + estimate_type) %>% + mutate(SE_calc = FALSE)}, + by = join_by(dataset, + estimate_type, + random_intercepts_1, + random_intercepts_2, + fixed_effects)) %>% + left_join({all_model_fits_multivar %>% + unnest(params) %>% + filter(Effects == "random") %>% + filter(if_any(contains("CI"), + list(is.infinite, is.na))) %>% + distinct(fixed_effects, + random_intercepts_1, + random_intercepts_2, + dataset, + estimate_type) %>% + mutate(CI_calc = FALSE)}, + by = join_by(dataset, + estimate_type, + random_intercepts_1, + random_intercepts_2, + fixed_effects)) %>% + rowwise() %>% + mutate(across(c(SE_calc, CI_calc), ~ ifelse(is.na(.x), TRUE, .x)), + across(c(SE_calc, CI_calc, singularity), + ~ ifelse(is_false(convergence), NA, .x))) + +# If singularity == FALSE and convergence == TRUE, +# but the model appears here, then that's because # the SD and CI's couldn't be estimated by parameters:: yi_multivar_singularity_convergence %>% - select(-fixed_effects, -fitted_model, -params, -model_na) %>% + select(-fixed_effects, -fitted_model, -params) %>% arrange(random_intercepts_1, random_intercepts_2, dataset, @@ -1571,18 +1635,23 @@ yi_multivar_singularity_convergence %>% locations = list( cells_body(columns = "singularity", rows = singularity == TRUE), cells_body(columns = "convergence", rows = convergence == FALSE), #TODO why didn't work here?? - cells_body(columns = "SD_calc", rows = SD_calc == FALSE) + cells_body(columns = "SE_calc", rows = SE_calc == FALSE), + cells_body(columns = "CI_calc", rows = CI_calc == FALSE) ) ) %>% - gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", "no" ), + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), locations = cells_body(columns = c("singularity", "convergence", - "SD_calc"))) %>% + "SE_calc", + "CI_calc"))) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::cols_label(dataset = "Dataset", singularity = "Singular Fit?", convergence = "Model converged?", - SD_calc = "Can random effect SE be calculated?") %>% + SE_calc = gt::md("Can random effects $\\text{SE}$ be calculated?"), + CI_calc = "Can random effect CI be calculated?" + ) %>% gt::tab_spanner(label = "Random Effects", columns = gt::starts_with("random")) %>% gt::sub_missing() %>% @@ -1593,66 +1662,25 @@ yi_multivar_singularity_convergence %>% gt_fmt_yi(columns = "estimate_type") ``` -For the blue tit analyses, the only models that did converged, were not singular and had estimable random effect variances were the $y_{25}$ and $y_{50}$ prediction scenarios with Reviewer ID as the model random effect, and the $y_{50}$ scenario with Study ID as the random effect (@tbl-yi-multivar-singularity-convergence). - -Of the different random effects structures we trialled for the *Eucalyptus* analyses, only the model with Study ID sa the random effect successfully fitted to the $y_{25}$ prediction scenario, with models either failing to converge due to complete separation (`lme4::` error: `Downdated VtV is not positive definite`, see ). - -Consequently, we deviated from our intended plan of using random effects for both Effect ID and Reviewer ID, instead using a single random effect for Reviewer ID for the $y_{25}$ and $y_{50}$ prediction scnearios for the blue tit datasets, and Study ID for the $y_{25}$ scenario for the *Eucalyptus* analysis (@tbl-BT-yi-multivar-summary, @tbl-BT-yi-multivar-params). - -```{r} -#| label: tbl-BT-yi-multivar-summary -#| tbl-cap: "Model summary statistic for non-singular, converging multivariate models fit to out-of-sample predictions." -yi_multivar_singularity_convergence %>% - select(-params) %>% - filter(SD_calc == TRUE) %>% - mutate(broom_summary = - map(fitted_model, broom.mixed::glance), - performance_summary = - map(fitted_model, performance::performance)) %>% - unnest(c(performance_summary, - broom_summary), names_sep = "-") %>% - select(dataset, estimate_type, random_intercepts_1, - contains(c( "RMSE", "sigma", "R2", "nobs", "ICC")), - -contains("AICc")) %>% - rename_with(~ str_remove(.x, "performance_summary-") %>% - str_remove("broom_summary-")) %>% - select(-sigma) %>% - relocate(nobs, .after = "ICC") %>% - gt::gt(groupname_col = "dataset", rowname_col = "estimate_type") %>% - gt::opt_stylize(style = 6, color = "gray") %>% - gt::cols_label(estimate_type = "Prediction Scenario", - random_intercepts_1 = "Random Effect", - R2_conditional = gt::md("$$R^{2}_\\text{Conditional}$$"), - R2_marginal = gt::md("$$R^{2}_\\text{Marginal}$$"), - Sigma = gt::md("$$\\sigma$$"), - dataset = "Dataset", - nobs = gt::md("$N_{Obs}$")) %>% - gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), - columns = dataset), - style = cell_text(style = "italic")) %>% - gt::cols_hide(dataset) %>% - gt_fmt_yi(columns = "estimate_type") %>% - gt::fmt_scientific(columns = c("RMSE", "Sigma"), - decimals = 2) %>% - gt::fmt_number(columns = c(gt::starts_with("R2"), "ICC"), - decimals = 2) %>% - gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), - locations = cells_row_groups(groups = "eucalyptus")) %>% - gt::as_raw_html() -``` +Consequently, we deviated from our intended plan of using random effects for both Effect ID and Reviewer ID, instead using a single random effect for Reviewer ID for the $y_{50}$ and $y_{75}$ prediction scenarios for both blue tit and *Eucalyptus* datasets (@tbl-BT-yi-multivar-summary, @tbl-BT-yi-multivar-params). ```{r} #| label: tbl-BT-yi-multivar-params #| tbl-cap: "Parameter estimates for converging, non-singular multivariate models fitted to blue tit out-of-sample-prediction estimates $y_i$." -#| column: page-right +#| column: page yi_multivar_singularity_convergence %>% - filter(SD_calc == TRUE) %>% + filter(SE_calc == TRUE) %>% filter(random_intercepts_1 != "study_id" | dataset != "blue tit") %>% #rm eliminated modl select(dataset, estimate_type, params) %>% unnest(params) %>% relocate(c(Effects, Group), .after = Parameter) %>% gt::gt(rowname_col = "estimate_type", groupname_col = "dataset") %>% + gt::fmt_number(columns = c(-dataset, -estimate_type), + decimals = 2, + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE + ) %>% gt::text_transform(fn = function(x) str_replace(x, "publishable_as_is", "Categorical Peer Rating") %>% str_replace(., "rate_analysis", "Continuous Peer Rating") %>% str_replace(., "mean_diversity_index", "Sorensen's Index") %>% @@ -1660,8 +1688,6 @@ yi_multivar_singularity_convergence %>% locations = cells_body(columns = c("Parameter"))) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::sub_missing(missing_text = "") %>% - gt::fmt(columns = function(x) rlang::is_bare_numeric(x), - fns = function(x) format(x, digits = 3)) %>% gt::fmt(columns = "p", fns = function(x) gtsummary::style_pvalue(x)) %>% gt::text_transform( @@ -1672,65 +1698,133 @@ yi_multivar_singularity_convergence %>% paste0("") } ) %>% - gt::text_transform(locations = cells_body(columns = Group, rows = Group %in% c("reviewer_id", "study_id")), + gt::text_transform(locations = + cells_body(columns = Group, + rows = Group %in% c("reviewer_id", "study_id")), fn = function(x){ str_replace(x, "_", " ") %>% Hmisc::capitalize() %>% str_replace("id", "ID") }) %>% - gt::cols_label(CI_low = gt::md("95\\%CI")) %>% - gt::cols_label(df_error = "df") %>% - gt::cols_merge(columns = starts_with("CI_"), - pattern = "[{1},{2}]") %>% + gt::cols_label(CI_low = gt::md("95\\%CI"), df_error = "df", p = gt::md("*p*"), SE = gt::md("$\\text{SE}$")) %>% gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), locations = cells_row_groups(groups = "eucalyptus")) %>% + gt_fmt_yi(columns = "estimate_type") %>% + fmt_number(columns = c(gt::contains("CI"), "SE", "t"), + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE, + decimals = 2) %>% + gt::fmt_scientific(columns = c("Coefficient"), + rows = abs(Coefficient) < 0.01 | abs(Coefficient) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("SE"), + rows = abs(SE) < 0.01 | abs(SE) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = t, + rows = abs(t) < 0.01, + decimals = 2) %>% + gt::fmt_scientific(columns = CI_low, + rows = abs(CI_low) < 0.01 | abs(CI_low) > 1000) %>% + gt::fmt_scientific(columns = CI_high, + rows = abs(CI_high) < 0.01 | abs(CI_high) > 1000, + decimals = 2) %>% + gt::cols_hide(Effects) %>% + gt::cols_merge(columns = starts_with("CI_"), + pattern = "[{1},{2}]") %>% gt::cols_hide("CI") %>% + gt::cols_label(Group = "Random Effect") +``` + + +```{r} +#| label: tbl-BT-yi-multivar-summary +#| tbl-cap: "Model summary statistics for non-singular, converging multivariate models fit to out-of-sample estimates $y_i$." + +ManyEcoEvo_yi_viz %>% + filter(model_name == "MA_mod_mv") %>% + rowwise() %>% + mutate(converged = + possibly_check_convergence(model), + singularity = possibly_check_singularity(model)) %>% + select(dataset, estimate_type, mod_fit_stats, mod_glance) %>% + hoist(mod_fit_stats, "RMSE", "Sigma", "R2_conditional", "R2_marginal", "ICC") %>% + hoist(mod_glance, "nobs") %>% + select(-mod_glance, -mod_fit_stats) %>% + semi_join({ManyEcoEvo_yi_viz %>% + filter(model_name == "MA_mod_mv") %>% + rowwise() %>% + transmute(dataset, + estimate_type, + converged = possibly_check_convergence(model), + singularity = possibly_check_singularity(model)) %>% + filter(converged, !singularity)}, + by = join_by(dataset, estimate_type)) %>% + relocate(nobs, .after = "ICC") %>% + gt::gt(groupname_col = "dataset", rowname_col = "estimate_type") %>% + gt::opt_stylize(style = 6, color = "gray") %>% + gt::cols_label(estimate_type = "Prediction Scenario", + R2_conditional = gt::md("$$R^{2}_\\text{Conditional}$$"), + R2_marginal = gt::md("$$R^{2}_\\text{Marginal}$$"), + Sigma = gt::md("$$\\sigma$$"), + dataset = "Dataset", + nobs = gt::md("$N_{Obs}$")) %>% + gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), + columns = dataset), + style = cell_text(style = "italic")) %>% + gt::cols_hide(dataset) %>% gt_fmt_yi(columns = "estimate_type") %>% - gt::fmt_scientific(columns = c("Coefficient", "SE", "t", gt::contains("CI")), - decimals = 2) + gt::fmt_number(columns = c(gt::starts_with("R2"), "ICC", "Sigma", "RMSE"), + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE, + decimals = 2) %>% + gt::fmt_scientific(columns = c("RMSE"), + rows = abs(RMSE) < 0.01 | abs(RMSE) > 1000, + decimals = 2) %>% + gt::fmt_scientific(columns = c("Sigma"), + rows = abs(Sigma) < 0.01 | abs(Sigma) > 1000, + decimals = 2) %>% + gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), + locations = cells_row_groups(groups = "eucalyptus")) ``` + ## Model Summary Metrics for out-of-sample predictions $y_i$ {#sec-yi-summary} ```{r} #| label: calc-tbl-yi-deviation-parameter-estimates -#| echo: false #| warning: false #| message: false #| include: false all_yi_deviation_models <- ManyEcoEvo_yi_viz %>% - filter(exclusion_set == "complete", - dataset == "blue tit", - model_name %nin% c("MA_mod", "box_cox_rating_cat_no_int")) %>% - bind_rows({euc_yi_results %>% - filter(model_name %nin% c("MA_mod", "box_cox_rating_cat_no_int"))}) %>% + filter(model_name %nin% c("MA_mod", "box_cox_rating_cat_no_int", "MA_mod_mv")) %>% mutate( dataset = case_when(str_detect(dataset, "eucalyptus") ~ "Eucalyptus", TRUE ~ dataset)) %>% semi_join( - {yi_singularity_convergence_sorensen_mixed_mod %>% - filter(singularity == FALSE)}, + {yi_convergence_singularity %>% filter(convergence, !singularity, SE_calc, CI_calc) %>% select(-convergence, -singularity, -ends_with("_calc"))}, by = join_by("dataset", "estimate_type") - ) - -tbl_data_yi_deviation_model_params <- all_yi_deviation_models %>% - mutate(perf = map(model, performance::performance), - mod_sum_stats = map(model, broom.mixed::glance) ) %>% - select(dataset, estimate_type, model_name, perf, mod_sum_stats) %>% - hoist(mod_sum_stats, "nobs") %>% - unnest(cols = perf) %>% - select(-mod_sum_stats) %>% + rowwise() %>% + filter(!is_logical(model)) + +tbl_data_yi_deviation_model_params <- + all_yi_deviation_models %>% + group_by(dataset, estimate_type, model_name) %>% + select(mod_glance, mod_fit_stats) %>% + hoist(mod_fit_stats, "RMSE", "Sigma", "R2_conditional", "R2_marginal", "ICC", "R2") %>% + hoist(mod_glance, "nobs") %>% select(-mod_fit_stats, -mod_glance) %>% mutate(model_name = forcats::as_factor(model_name), model_name = forcats::fct_relevel(model_name, c("box_cox_rating_cat", "box_cox_rating_cont", "sorensen_glm", "uni_mixed_effects")), - model_name = forcats::fct_recode(model_name, - `Deviation explained by categorical ratings` = "box_cox_rating_cat", - `Deviation explained by continuous ratings` = "box_cox_rating_cont", - `Deviation explained by Sorensen's index` = "sorensen_glm", - `Deviation explained by inclusion of random effects` = "uni_mixed_effects")) %>% + model_name = + forcats::fct_recode( + model_name, + `Deviation explained by categorical ratings` = "box_cox_rating_cat", + `Deviation explained by continuous ratings` = "box_cox_rating_cont", + `Deviation explained by Sorensen's index` = "sorensen_glm", + `Deviation explained by inclusion of random effects` = "uni_mixed_effects")) %>% group_by(model_name) %>% relocate("R2", .before = starts_with("R2_")) ``` @@ -1739,20 +1833,18 @@ tbl_data_yi_deviation_model_params <- all_yi_deviation_models %>% #| label: tbl-yi-deviation-parameter-estimates #| message: false #| warning: false -#| tbl-cap: "Model summary metrics for models of Box-Cox transformed deviation from the mean $y_i$ estimate as a function of categorical peer-review rating, continuous peer-review rating, and Sorensen's index for blue tit and *Eucalyptus* analyses, and also for the inclusion of random effects for *Eucalyptus* analyses. Coefficient of determination, $R^2$, is reported for models of deviation as a function of Sorensen diversity scores and presence of random effects, while $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) are reported for models of deviation as explained by peer-review ratings. For all models the residual standard deviation $\\sigma$, root mean squared error (RMSE) were calculated. The number of observations ($N_{Obs.}$) is displayed for reference." -#| column: page-inset-right +#| tbl-cap: "Model summary metrics for models of Box-Cox transformed deviation from the mean $y_i$ estimate as a function of categorical peer-review rating, continuous peer-review rating, and Sorensen's index for blue tit and *Eucalyptus* analyses, and also for the inclusion of random effects for *Eucalyptus* analyses. Coefficient of determination, $R^2$, is reported for models of deviation as a function of Sorensen diversity scores and presence of random effects, while $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$ and the intra-class correlation (ICC) are reported for models of deviation as explained by peer-review ratings. For all models the residual standard deviation $\\sigma$, root mean squared error (RMSE) were calculated. The number of observations ($N_{\\text{Obs.}}$) is displayed for reference." +#| column: page tbl_data_yi_deviation_model_params %>% - select(!c(contains("AIC"), BIC)) %>% - dplyr::filter(dataset != "blue tit" | str_detect(model_name, "random", negate = TRUE)) %>% gt::gt(rowname_col = "dataset") %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::sub_missing(missing_text = "") %>% gt::cols_label(dataset = "Dataset", R2 = gt::md("$$R^2$$"), - R2_conditional = "$$R^{2}_\\text{Conditional}$$", - R2_marginal = "$$R^{2}_\\text{Marginal}$$", - Sigma = "$$\\sigma$$", - nobs = "$$N_{Obs.}$$", + R2_conditional = gt::md("$$R^{2}_\\text{Conditional}$$"), + R2_marginal = gt::md("$$R^{2}_\\text{Marginal}$$"), + Sigma = gt::md("$$\\sigma$$"), + nobs = gt::md("$$N_{\\text{Obs.}}$$"), estimate_type = "Prediction Scenario") %>% gt::tab_style(locations = cells_body(rows = str_detect(dataset, "Eucalyptus"), columns = dataset), @@ -1767,24 +1859,55 @@ tbl_data_yi_deviation_model_params %>% ) %>% gt::tab_style(locations = gt::cells_stub(rows = str_detect(dataset, "Eucalyptus")), style = cell_text(style = "italic")) %>% - gt::fmt_scientific(columns = c("R2", "RMSE", "Sigma"), - drop_trailing_zeros = T, - decimals = 2) %>% - gt::fmt_number(columns = gt::contains(c("ICC")), - drop_trailing_zeros = T, - decimals = 3) %>% - gt::fmt_number(columns = gt::contains(c("R2_")), + gt::fmt_number(columns = gt::contains(c("ICC", "RMSE")), + drop_trailing_dec_mark = TRUE, drop_trailing_zeros = T, decimals = 2) %>% + gt::fmt_scientific(columns = c("RMSE"), + rows = abs(RMSE) < 0.01 | abs(RMSE) > 1000, + drop_trailing_dec_mark = TRUE, + drop_trailing_zeros = T, + decimals = 2) %>% gt::fmt_scientific(columns = c(gt::contains(("R2_marginal"))), - rows = str_detect(model_name, "continuous"), + rows = str_detect(model_name, "continuous|categorical"), drop_trailing_zeros = T, - decimals = 2) %>% gt_fmt_yi("estimate_type") + decimals = 2) %>% + gt::fmt_number(columns = gt::contains(c("R2_conditional")), + drop_trailing_dec_mark = TRUE, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_number(columns = gt::contains(c("Sigma")), + drop_trailing_dec_mark = TRUE, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("Sigma"), + rows = abs(Sigma) < 0.01 | abs(Sigma) > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_number(columns = "R2", + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2"), + rows = abs(R2) < 0.01 | abs(R2) > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2_conditional"), + rows = abs(R2_conditional) < 0.01 | abs(R2_conditional) > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("R2_marginal"), + rows = abs(R2_marginal) < 0.01 | abs(R2_marginal) > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("ICC"), + rows = abs(ICC) < 0.01 | abs(ICC) > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt_fmt_yi("estimate_type") ``` ## Post-hoc analysis: checking the use of model weights in all models explaining deviation from the meta-analytic mean {#sec-post-hoc-weights-analysis} -As we describe in @box-weight-deviation, for models of deviation from the meta-analytic mean effect-size, we had intended to use the inverse variance of the Box-Cox transformed deviation from the meta-analytic mean as model weights. Unfortunately using our intended weights specification resulted in invalid transformed response variables for some models whereby extreme outliers were weighted more heavily (two orders of magnitude) than other effect sizes, which caused both issues in the estimated model parameters as well as convergence issues, in particular for models analysing the effect of categorical peer-review rating on deviation from the meta-analytic mean. +As we describe in @nte-box-weight-deviation, for models of deviation from the meta-analytic mean effect-size, we had intended to use the inverse variance of the Box-Cox transformed deviation from the meta-analytic mean as model weights. Unfortunately using our intended weights specification resulted in invalid transformed response variables for some models whereby extreme outliers were weighted more heavily (two orders of magnitude) than other effect sizes, which caused both issues in the estimated model parameters as well as convergence issues, in particular for models analysing the effect of categorical peer-review rating on deviation from the meta-analytic mean. ::: {.callout-note appearance="simple"} # Model Weight Calculation Details @@ -1813,8 +1936,6 @@ Which is executed in the \function{variance_box_cox} function from the the \pack ```{r} #| label: lst-variance-box-cox -#| echo: true -#| code-fold: false #| code-caption: "Function to calculate the variance of the Box-Cox transformed deviation scores." #| code-overflow: wrap #| filename: box_cox_transform.R @@ -1827,7 +1948,8 @@ variance_box_cox <- function(folded_mu, folded_v, lambda){ folded_params <- function(abs_dev_score, VZr){ mu <- abs_dev_score sigma <- sqrt(VZr) - fold_mu <- sigma * sqrt(2/pi) * exp((-mu^2)/(2 * sigma^2)) + mu * (1 - 2 * pnorm(-mu/sigma)) # folded abs_dev_score + fold_mu <- sigma * sqrt(2/pi) * exp((-mu^2)/(2 * sigma^2)) + + mu * (1 - 2 * pnorm(-mu/sigma)) # folded abs_dev_score fold_se <- sqrt(mu^2 + sigma^2 - fold_mu^2) fold_v <- fold_se^2 # folded VZr return(list(fold_mu = fold_mu, fold_v = fold_v)) @@ -1840,7 +1962,6 @@ We systematically investigated the impact of using different weighting schemes ( ```{r} #| label: calc-post-hoc-weights-analysis -#| echo: true #| warning: false #| message: false #| code-fold: true @@ -1877,8 +1998,9 @@ prepare_ratings_data <- function(effects_analysis){ } # Create base model formulat -base_formula <- rlang::new_formula(rlang::expr(box_cox_abs_deviation_score_estimate), - rlang::expr(PublishableAsIs)) +base_formula <- rlang::new_formula( + rlang::expr(box_cox_abs_deviation_score_estimate), + rlang::expr(PublishableAsIs)) # Create weight functions calc_inv_bc_var <- rlang::as_function(~ 1/pull(.x, box_cox_var)) calc_inv_folded_v <- rlang::as_function(~ 1/pull(.x, folded_v_val)) @@ -1887,9 +2009,10 @@ no_weights <- NA weight_formulas <- list(no_weights, calc_inv_bc_var, calc_inv_folded_v -) %>% purrr::set_names("no_weights", - "inv_bc_var", - "inv_folded_v") +) %>% + purrr::set_names("no_weights", + "inv_bc_var", + "inv_folded_v") # Create random effect expressions RE_rev <- expr((1 | ReviewerId)) @@ -1951,16 +2074,23 @@ all_models <- convergence = map_lgl(model, performance::check_convergence)) +possibly_estimate_means <- possibly(modelbased::estimate_means, otherwise = NULL) + # Extract Parameter Estimates estimate_means <- all_models %>% filter(singularity == F, convergence == T) %>% reframe(model = set_names(model, model_spec), - results = map(model, - possibly(modelbased::estimate_means, otherwise = NULL), - at = "PublishableAsIs"), - results = set_names(results, dataset), dataset = dataset, model_spec = model_spec) %>% + dataset = dataset, + model_spec = model_spec, + weights = case_when(!str_detect(model_spec, "no_weights") ~ "(weights)", + .default = NA)) %>% rowwise() %>% + mutate(weights = modify_if(list(weights), ~ is.na(.x), ~ NULL), + results = list(possibly_estimate_means(model, + by = "PublishableAsIs", weights = weights))) %>% + ungroup() %>% + mutate(results = set_names(results, dataset)) %>% drop_na(results) # model means couldn't be estimated due to convergence issues, drop those models # evaluate and compare performance for remaining models @@ -1997,11 +2127,10 @@ For the blue tit models of deviation influenced by categorical peer-review ratin ```{r} #| label: tbl-weights-analysis-fit-checks #| tbl-cap: "Singularity and convergence checks for all combinations of model weights and random-effects structure in models of the effect of categorical peer rating on deviation from the analytic mean. For some models, mean estimates of parameter levels for peer-review rating were not able to be estimated. " -#| echo: false all_models %>% select(dataset, model_spec, singularity, convergence) %>% - left_join(estimate_means %>% select(-model, -results) %>% + left_join(estimate_means %>% select(-model, -results, -weights) %>% mutate(estimate_means = T)) %>% separate(model_spec, into = c("model_spec_weight", "model_spec_random_effect"), sep = "\\.") %>% @@ -2028,7 +2157,8 @@ all_models %>% convergence = "Model converged?", model_spec_random_effect = "Random Effects", estimate_means = "Means Estimable?") %>% - gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", "no" ), + gt::text_transform(fn = function(x) ifelse(x == TRUE, "yes", + ifelse(x == FALSE, "no", x)), locations = cells_body(columns = c("singularity", "convergence", "estimate_means"))) %>% @@ -2055,8 +2185,9 @@ all_models %>% str_replace("inv_bc_var", "Inverse Box-Cox variance") %>% str_replace("inv_folded_v", "Inverse folded variance") } - ) %>% - gt::as_raw_html() + ) %>% + gt::tab_style(style = cell_text(style = "italic", transform = "capitalize"), + locations = cells_row_groups(groups = "eucalyptus")) ``` To check that the alternative weighting methods generated sensible parameter estimates, we generated marginal effects plots for all models that passed the convergence and singularity checks where marginal effects were estimable for both blue tit [@fig-effects-plots-BT] and *Eucalyptus* datasets [@fig-effects-plots-Euc]. @@ -2066,7 +2197,6 @@ Using the inverse Box-Cox transformed variance for model weights resulted in the ```{r} #| label: fig-effects-plots-Euc #| fig-cap: "Effect plots for each non-singular model that converged with estimable fixed effect group means for the *Eucalyptus* dataset." -#| echo: false #| fig-width: 10 #| fig-height: 10 #| fig-pos: page @@ -2089,10 +2219,10 @@ model_means_results %>% patchwork::wrap_plots() + patchwork::plot_annotation(tag_levels = 'A') ``` + ```{r} #| label: fig-effects-plots-BT #| fig-cap: "Effect plots for each non-singular model that converged with estimable fixed effect group means for the blue tit dataset." -#| echo: false modify_plot <- function(p, .y){ p + labs(subtitle = as_label(.y), @@ -2115,6 +2245,7 @@ model_means_results %>% ```{r} #| label: tbl-marginal-means-weights-analysis #| tbl-cap: "Marginal means estimate across weight and random effects specifications for all estimable models for both *Eucalyptus* and blue tit datasets." +#| column: page model_means_results %>% select(dataset, model_spec, results) %>% unnest(results) %>% @@ -2150,16 +2281,28 @@ model_means_results %>% ) %>% gt::cols_label(model_spec_random_effect = "Random Effects", PublishableAsIs = "Peer Rating", - CI_low = gt::md("95\\%CI")) %>% + CI_low = gt::md("95\\%CI"), + SE = gt::md("$\\text{SE}$")) %>% gt::cols_merge(columns = starts_with("CI_"), pattern = "[{1},{2}]") %>% gt::fmt_number(columns = c("Mean", "SE", "CI_low", "CI_high"), decimals = 2) %>% + gt::text_transform( + locations = cells_body( + columns = "model_spec_random_effect", + rows = PublishableAsIs != "deeply flawed and unpublishable" + ), + fn = function(x){ + paste0("") + } + ) %>% gt::tab_style( style = list(gt::cell_text(transform = "capitalize"), gt::cell_text(style = "italic")), locations = gt::cells_row_groups(groups = "eucalyptus") - ) + ) %>% + gt::tab_style(style = cell_text(transform = "capitalize"), + locations = cells_body(columns = "PublishableAsIs")) ``` @@ -2176,7 +2319,6 @@ The performance comparison plots confirmed our suspicions that the inverse Box-C #| fig-subcap: #| - "Blue tit models." #| - "*Eucalyptus* models." -#| echo: false #| message: false #| results: 'hide' @@ -2188,7 +2330,6 @@ model_comparison_plots <- "*Eucalyptus*")) %>% pull(results, "dataset") %>% map(plot) - # for printing plot name on figure # model_comparison_plots %>% # map2(.x = ., .y = names(.), ~ .x + ggtitle(.y) #+ @@ -2200,6 +2341,9 @@ model_comparison_plots ```{r} #| label: tbl-model-perf-metrics-weights-analysis #| tbl-cap: "Model performance metric values (non-normalised) for final subset of models considered in weights analysis. All models in final subset included random-effect of Reviewer ID. Metrics included $R^{2}_\\text{Conditional}$, $R^{2}_\\text{Marginal}$, Intra-Class Correlation, Root Mean Squared Error (RMSE), and the weighted AIC, corrected AIC and BIC." +#| message: false +#| warning: false +#| column: page all_models %>% filter(model_spec != "inv_bc_var.RE_study" | dataset != "eucalyptus") %>% #rm nearly unidentifiable model semi_join(estimate_means, @@ -2220,20 +2364,38 @@ all_models %>% R2_marginal = gt::md("$$R^{2}_\\text{Marginal}$$"), Sigma = gt::md("$$\\sigma$$"), AICc = gt::md("$$AIC_c$$"), - AICc_wt = gt::md("$$AIC_c$$ (weight)"), - BIC_wt = gt::md("$$BIC$$ (weight)"), - AIC_wt = gt::md("$$AIC$$ (weight)"), + AICc_wt = gt::md("$$AIC_c$$ (wt)"), + BIC_wt = gt::md("$$BIC$$ (wt)"), + AIC_wt = gt::md("$$AIC$$ (wt)"), AIC = gt::md("$$AIC$$"), BIC = gt::md("$$BIC$$")) %>% - gt::fmt_number(columns = !gt::contains(c("Name", "wt")), - rows = Name != "no_weights.RE_rev", - decimals = 2) %>% - gt::fmt_scientific(columns = c(gt::contains(("R2")), "ICC", "BIC", "AIC", "RMSE", "Sigma", "AICc"), + gt::fmt_number(columns = contains(c("AIC","AICc", "BIC", "R2_", "ICC", "Sigma")), + drop_trailing_zeros = TRUE, + drop_trailing_dec_mark = TRUE, + decimals = 2) %>% + gt::fmt_scientific(columns = "R2_conditional", + rows = R2_conditional < 0.01 | R2_conditional > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = "R2_marginal", + rows = R2_marginal < 0.01 | R2_marginal > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = "RMSE", + rows = RMSE < 0.01 | RMSE > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("ICC"), + rows = ICC < 0.01 | ICC > 1000, + drop_trailing_zeros = T, + decimals = 2) %>% + gt::fmt_scientific(columns = c("Sigma"), + rows = Sigma < 0.01 | Sigma > 1000, drop_trailing_zeros = T, decimals = 2) %>% gt::fmt_scientific(columns = gt::contains(c("_wt")), rows = Name != "no_weights.RE_rev", - drop_trailing_zeros = T, + drop_trailing_zeros = F, decimals = 2) %>% gt::opt_stylize(style = 6, color = "gray") %>% gt::tab_style( @@ -2247,8 +2409,7 @@ all_models %>% str_replace("no_weights", "None") %>% str_replace("inv_bc_var", "Inverse Box-Cox variance") %>% str_replace("inv_folded_v", "Inverse folded variance") - ) %>% - gt::as_raw_html() + ) ``` diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-box-cox-transformations-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-box-cox-transformations-1.png index a93347d..c592feb 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-box-cox-transformations-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-box-cox-transformations-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-BT-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-BT-1.png index 3d4e96b..1c3ec48 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-BT-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-BT-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-Euc-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-Euc-1.png index 88da336..ed885fc 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-Euc-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-effects-plots-Euc-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-1.png index b9c1796..5a970f8 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-2.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-2.png new file mode 100644 index 0000000..41f5dc4 Binary files /dev/null and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-2.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-3.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-3.png new file mode 100644 index 0000000..d440ddf Binary files /dev/null and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-3.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-4.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-4.png new file mode 100644 index 0000000..70498fb Binary files /dev/null and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-4.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-5.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-5.png new file mode 100644 index 0000000..e3ca834 Binary files /dev/null and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cat-rating-5.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cont-rating-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cont-rating-1.png index 9b8f90d..b89e45b 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cont-rating-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-deviation-cont-rating-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-euc-deviation-RE-plots-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-euc-deviation-RE-plots-1.png index ee06fb0..ce3948d 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-euc-deviation-RE-plots-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-euc-deviation-RE-plots-1.png differ diff --git a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-sorensen-plots-1.png b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-sorensen-plots-1.png index 525ef22..e677fac 100644 Binary files a/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-sorensen-plots-1.png and b/supp_mat/SM3_ExplainingDeviation_files/figure-html/fig-yi-sorensen-plots-1.png differ diff --git a/supp_mat/SM4_case_study_datasets.qmd b/supp_mat/SM4_case_study_datasets.qmd index 886cc2b..c867cb9 100644 --- a/supp_mat/SM4_case_study_datasets.qmd +++ b/supp_mat/SM4_case_study_datasets.qmd @@ -1,12 +1,15 @@ --- title: "Correlation Matrices of Case Study Data" -format: html +format: + html: + code-fold: true + echo: true editor: visual number-sections: true -code-fold: true execute: - freeze: false # re-render only when source changes + freeze: auto # re-render only when source changes toc: false +pre-render: "utils.R" bibliography: - ../ms/references.bib - ../ms/grateful-refs.bib @@ -21,6 +24,7 @@ library(tidyverse) library(ManyEcoEvo) library(GGally) set.seed(1234) +source(here::here("utils.R")) ``` Pairwise-correlation plots for the *Eucalyptus* and blue tit case-study data provided to analysts are shown in @fig-ggpairs-eucalyptus and @fig-ggpairs-bt, respectively. Plots were created with R package `GGally` [@GGally]. @@ -35,7 +39,7 @@ Pairwise-correlation plots for the *Eucalyptus* and blue tit case-study data pro #| fig-width: 20 #| fig-height: 20 -ManyEcoEvo::euc_data %>% +euc_data %>% select(where(is_double), -Date, -`Quadrat no`, @@ -57,7 +61,7 @@ ManyEcoEvo::euc_data %>% #| fig-height: 15 #| eval: true -ManyEcoEvo::blue_tit_data %>% +blue_tit_data %>% naniar::replace_with_na_all(condition = ~ .x == ".") %>% mutate(across(c(contains("_ring"), rear_nest_trt, diff --git a/utils.R b/utils.R new file mode 100644 index 0000000..26c9573 --- /dev/null +++ b/utils.R @@ -0,0 +1,11 @@ +# Helper Functions +round_pluck <- function(data, x){pluck(data, x, \(y) round(y, 2))} + +gt_fmt_yi <- function(gt_tbl, columns, ...) { + gt_tbl %>% + gt::fmt(!!columns, + fns = function(x) str_replace(x, "y25", gt::md("$$y_{25}$$")) %>% + str_replace("y50", gt::md("$$y_{50}$$")) %>% + str_replace("y75", gt::md("$$y_{75}$$")), + ...) +}