generated from sipbs-compbiol/sipbs-compbiol-book-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
206 lines (198 loc) · 9.23 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
@ARTICLE{Cartwright2011-jb,
title = "A philosopher's view of the long road from {RCTs} to
effectiveness",
author = "Cartwright, Nancy",
journal = "Lancet",
publisher = "Elsevier BV",
volume = 377,
number = 9775,
pages = "1400--1401",
month = apr,
year = 2011,
language = "en"
}
@ARTICLE{Faul2007-le,
title = "{G*Power} 3: a flexible statistical power analysis program for
the social, behavioral, and biomedical sciences",
author = "Faul, Franz and Erdfelder, Edgar and Lang, Albert-Georg and
Buchner, Axel",
abstract = "G*Power (Erdfelder, Faul, \& Buchner, 1996) was designed as a
general stand-alone power analysis program for statistical tests
commonly used in social and behavioral research. G*Power 3 is a
major extension of, and improvement over, the previous versions.
It runs on widely used computer platforms (i.e., Windows XP,
Windows Vista, and Mac OS X 10.4) and covers many different
statistical tests of the t, F, and chi2 test families. In
addition, it includes power analyses for z tests and some exact
tests. G*Power 3 provides improved effect size calculators and
graphic options, supports both distribution-based and
design-based input modes, and offers all types of power analyses
in which users might be interested. Like its predecessors,
G*Power 3 is free.",
journal = "Behav. Res. Methods",
publisher = "Springer Science and Business Media LLC",
volume = 39,
number = 2,
pages = "175--191",
month = may,
year = 2007,
language = "en"
}
@ARTICLE{Garcia-Sifuentes2021-pa,
title = "Reporting and misreporting of sex differences in the biological
sciences",
author = "Garcia-Sifuentes, Yesenia and Maney, Donna L",
abstract = "As part of an initiative to improve rigor and reproducibility in
biomedical research, the U.S. National Institutes of Health now
requires the consideration of sex as a biological variable in
preclinical studies. This new policy has been interpreted by
some as a call to compare males and females with each other.
Researchers testing for sex differences may not be trained to do
so, however, increasing risk for misinterpretation of results.
Using a list of recently published articles curated by Woitowich
et al. (eLife, 2020; 9:e56344), we examined reports of sex
differences and non-differences across nine biological
disciplines. Sex differences were claimed in the majority of the
147 articles we analyzed; however, statistical evidence
supporting those differences was often missing. For example,
when a sex-specific effect of a manipulation was claimed,
authors usually had not tested statistically whether females and
males responded differently. Thus, sex-specific effects may be
over-reported. In contrast, we also encountered practices that
could mask sex differences, such as pooling the sexes without
first testing for a difference. Our findings support the need
for continuing efforts to train researchers how to test for and
report sex differences in order to promote rigor and
reproducibility in biomedical research.",
journal = "Elife",
publisher = "eLife Sciences Publications, Ltd",
volume = 10,
month = nov,
year = 2021,
keywords = "medicine; meta-research; neuroscience; none; sex differences;
sex inclusion",
copyright = "http://creativecommons.org/licenses/by/4.0/",
language = "en"
}
@ARTICLE{Karp2021-lc,
title = "What is the optimum design for my animal experiment?",
author = "Karp, Natasha A and Fry, Derek",
abstract = "Within preclinical research, attention has focused on
experimental design and how current practices can lead to poor
reproducibility. There are numerous decision points when
designing experiments. Ethically, when working with animals we
need to conduct a harm-benefit analysis to ensure the animal use
is justified for the scientific gain. Experiments should be
robust, not use more or fewer animals than necessary, and truly
add to the knowledge base of science. Using case studies to
explore these decision points, we consider how individual
experiments can be designed in several different ways. We use
the Experimental Design Assistant (EDA) graphical summary of
each experiment to visualise the design differences and then
consider the strengths and weaknesses of each design. Through
this format, we explore key and topical experimental design
issues such as pseudo-replication, blocking, covariates, sex
bias, inference space, standardisation fallacy and factorial
designs. There are numerous articles discussing these critical
issues in the literature, but here we bring together these
topics and explore them using real-world examples allowing the
implications of the choice of design to be considered.
Fundamentally, there is no perfect experiment; choices must be
made which will have an impact on the conclusions that can be
drawn. We need to understand the limitations of an experiment's
design and when we report the experiments, we need to share the
caveats that inherently exist.",
journal = "BMJ Open Sci.",
publisher = "Portico",
volume = 5,
number = 1,
pages = "e100126",
month = mar,
year = 2021,
keywords = "animal; biostatistics; disease models; models; research design",
language = "en"
}
@ARTICLE{Kass1995-hs,
title = "Bayes factors",
author = "Kass, Robert E and Raftery, Adrian E",
journal = "J. Am. Stat. Assoc.",
publisher = "Informa UK Limited",
volume = 90,
number = 430,
pages = "773--795",
month = jun,
year = 1995,
language = "en"
}
@article{knuth84,
author = {Knuth, Donald E.},
title = {Literate Programming},
year = {1984},
issue_date = {May 1984},
publisher = {Oxford University Press, Inc.},
address = {USA},
volume = {27},
number = {2},
issn = {0010-4620},
url = {https://doi.org/10.1093/comjnl/27.2.97},
doi = {10.1093/comjnl/27.2.97},
journal = {Comput. J.},
month = may,
pages = {97–111},
numpages = {15}
}
@ARTICLE{Mayr2007-ev,
title = "A short tutorial of {GPower}",
author = "Mayr, Susanne and Erdfelder, Edgar and Buchner, Axel and Faul,
Franz",
journal = "Tutor. Quant. Methods Psychol.",
publisher = "The Quantitative Methods for Psychology",
volume = 3,
number = 2,
pages = "51--59",
month = sep,
year = 2007
}
@ARTICLE{Nieuwenhuis2011-nl,
title = "Erroneous analyses of interactions in neuroscience: a problem of
significance",
author = "Nieuwenhuis, Sander and Forstmann, Birte U and Wagenmakers,
Eric-Jan",
abstract = "In theory, a comparison of two experimental effects requires a
statistical test on their difference. In practice, this
comparison is often based on an incorrect procedure involving
two separate tests in which researchers conclude that effects
differ when one effect is significant (P 0.05). We reviewed 513
behavioral, systems and cognitive neuroscience articles in five
top-ranking journals (Science, Nature, Nature Neuroscience,
Neuron and The Journal of Neuroscience) and found that 78 used
the correct procedure and 79 used the incorrect procedure. An
additional analysis suggests that incorrect analyses of
interactions are even more common in cellular and molecular
neuroscience. We discuss scenarios in which the erroneous
procedure is particularly beguiling.",
journal = "Nat. Neurosci.",
publisher = "Springer Science and Business Media LLC",
volume = 14,
number = 9,
pages = "1105--1107",
month = aug,
year = 2011,
language = "en"
}
@ARTICLE{Vorland2021-wx,
title = "Sex difference analyses under scrutiny",
author = "Vorland, Colby J",
abstract = "A survey reveals that many researchers do not use appropriate
statistical analyses to evaluate sex differences in biomedical
research.",
journal = "Elife",
publisher = "eLife Sciences Publications, Ltd",
volume = 10,
month = nov,
year = 2021,
keywords = "medicine; meta-research; methodological weakness; neuroscience;
none; sex differences; sex inclusion; statistics; study design",
copyright = "http://creativecommons.org/licenses/by/4.0/",
language = "en"
}