generated from kogpsy/neuroscicomplabFS22
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bibliography_gw.bib
240 lines (207 loc) · 21 KB
/
bibliography_gw.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
@article{rousselet_beyond_2017,
title = {Beyond differences in means: robust graphical methods to compare two groups in neuroscience},
volume = {46},
issn = {0953816X},
shorttitle = {Beyond differences in means},
url = {https://onlinelibrary.wiley.com/doi/10.1111/ejn.13610},
doi = {10.1111/ejn.13610},
abstract = {If many changes are necessary to improve the quality of neuroscience research, one relatively simple step could have great payoffs: to promote the adoption of detailed graphical methods, combined with robust inferential statistics. Here, we illustrate how such methods can lead to a much more detailed understanding of group differences than bar graphs and t-tests on means. To complement the neuroscientist’s toolbox, we present two powerful tools that can help us understand how groups of observations differ: the shift function and the difference asymmetry function. These tools can be combined with detailed visualisations to provide complementary perspectives about the data. We provide implementations in R and MATLAB of the graphical tools, and all the examples in the article can be reproduced using R scripts.},
language = {en},
number = {2},
urldate = {2023-01-25},
journal = {European Journal of Neuroscience},
author = {Rousselet, Guillaume A. and Pernet, Cyril R. and Wilcox, Rand R.},
month = jul,
year = {2017},
pages = {1738--1748},
file = {Rousselet et al. - 2017 - Beyond differences in means robust graphical meth.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\N65ZN35W\\Rousselet et al. - 2017 - Beyond differences in means robust graphical meth.pdf:application/pdf},
}
@incollection{jaeger_perceptual_2014,
address = {New York, NY},
title = {Perceptual {Decision} {Making}},
isbn = {978-1-4614-7320-6},
url = {https://link.springer.com/10.1007/978-1-4614-7320-6_317-1},
language = {en},
urldate = {2023-02-16},
booktitle = {Encyclopedia of {Computational} {Neuroscience}},
publisher = {Springer New York},
author = {Hauser, Christopher K. and Salinas, Emilio},
editor = {Jaeger, Dieter and Jung, Ranu},
year = {2014},
doi = {10.1007/978-1-4614-7320-6_317-1},
pages = {1--21},
file = {Hauser und Salinas - 2014 - Perceptual Decision Making.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\CLCHE4XZ\\Hauser und Salinas - 2014 - Perceptual Decision Making.pdf:application/pdf},
}
@article{mulder_bias_2012,
title = {Bias in the {Brain}: {A} {Diffusion} {Model} {Analysis} of {Prior} {Probability} and {Potential} {Payoff}},
volume = {32},
issn = {0270-6474, 1529-2401},
shorttitle = {Bias in the {Brain}},
url = {https://www.jneurosci.org/lookup/doi/10.1523/JNEUROSCI.4156-11.2012},
doi = {10.1523/JNEUROSCI.4156-11.2012},
language = {en},
number = {7},
urldate = {2023-02-16},
journal = {Journal of Neuroscience},
author = {Mulder, M. J. and Wagenmakers, E.-J. and Ratcliff, R. and Boekel, W. and Forstmann, B. U.},
month = feb,
year = {2012},
pages = {2335--2343},
file = {Mulder et al. - 2012 - Bias in the Brain A Diffusion Model Analysis of P.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\MKH9R5V6\\Mulder et al. - 2012 - Bias in the Brain A Diffusion Model Analysis of P.pdf:application/pdf},
}
@article{davis_team_nodate,
title = {The {Team} {Software} {ProcessSM} ({TSPSM}) in {Practice}: {A} {Summary} of {Recent} {Results}},
language = {en},
author = {Davis, Noopur and Mullaney, Julia},
file = {Davis und Mullaney - The Team Software ProcessSM (TSPSM) in Practice A.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\DP64H2R8\\Davis und Mullaney - The Team Software ProcessSM (TSPSM) in Practice A.pdf:application/pdf},
}
@article{open_science_collaboration_estimating_2015,
title = {Estimating the reproducibility of psychological science},
volume = {349},
issn = {0036-8075, 1095-9203},
url = {https://www.science.org/doi/10.1126/science.aac4716},
doi = {10.1126/science.aac4716},
abstract = {Empirically analyzing empirical evidence
One of the central goals in any scientific endeavor is to understand causality. Experiments that seek to demonstrate a cause/effect relation most often manipulate the postulated causal factor. Aarts
et al.
describe the replication of 100 experiments reported in papers published in 2008 in three high-ranking psychology journals. Assessing whether the replication and the original experiment yielded the same result according to several criteria, they find that about one-third to one-half of the original findings were also observed in the replication study.
Science
, this issue
10.1126/science.aac4716
,
A large-scale assessment suggests that experimental reproducibility in psychology leaves a lot to be desired.
,
INTRODUCTION
Reproducibility is a defining feature of science, but the extent to which it characterizes current research is unknown. Scientific claims should not gain credence because of the status or authority of their originator but by the replicability of their supporting evidence. Even research of exemplary quality may have irreproducible empirical findings because of random or systematic error.
RATIONALE
There is concern about the rate and predictors of reproducibility, but limited evidence. Potentially problematic practices include selective reporting, selective analysis, and insufficient specification of the conditions necessary or sufficient to obtain the results. Direct replication is the attempt to recreate the conditions believed sufficient for obtaining a previously observed finding and is the means of establishing reproducibility of a finding with new data. We conducted a large-scale, collaborative effort to obtain an initial estimate of the reproducibility of psychological science.
RESULTS
We conducted replications of 100 experimental and correlational studies published in three psychology journals using high-powered designs and original materials when available. There is no single standard for evaluating replication success. Here, we evaluated reproducibility using significance and
P
values, effect sizes, subjective assessments of replication teams, and meta-analysis of effect sizes. The mean effect size (r) of the replication effects (
M
r
= 0.197, SD = 0.257) was half the magnitude of the mean effect size of the original effects (
M
r
= 0.403, SD = 0.188), representing a substantial decline. Ninety-seven percent of original studies had significant results (
P
{\textless} .05). Thirty-six percent of replications had significant results; 47\% of original effect sizes were in the 95\% confidence interval of the replication effect size; 39\% of effects were subjectively rated to have replicated the original result; and if no bias in original results is assumed, combining original and replication results left 68\% with statistically significant effects. Correlational tests suggest that replication success was better predicted by the strength of original evidence than by characteristics of the original and replication teams.
CONCLUSION
No single indicator sufficiently describes replication success, and the five indicators examined here are not the only ways to evaluate reproducibility. Nonetheless, collectively these results offer a clear conclusion: A large portion of replications produced weaker evidence for the original findings despite using materials provided by the original authors, review in advance for methodological fidelity, and high statistical power to detect the original effect sizes. Moreover, correlational evidence is consistent with the conclusion that variation in the strength of initial evidence (such as original
P
value) was more predictive of replication success than variation in the characteristics of the teams conducting the research (such as experience and expertise). The latter factors certainly can influence replication success, but they did not appear to do so here.
Reproducibility is not well understood because the incentives for individual scientists prioritize novelty over replication. Innovation is the engine of discovery and is vital for a productive, effective scientific enterprise. However, innovative ideas become old news fast. Journal reviewers and editors may dismiss a new test of a published idea as unoriginal. The claim that “we already know this” belies the uncertainty of scientific evidence. Innovation points out paths that are possible; replication points out paths that are likely; progress relies on both. Replication can increase certainty when findings are reproduced and promote innovation when they are not. This project provides accumulating evidence for many findings in psychological research and suggests that there is still more work to do to verify whether we know what we think we know.
Original study effect size versus replication effect size (correlation coefficients).
Diagonal line represents replication effect size equal to original effect size. Dotted line represents replication effect size of 0. Points below the dotted line were effects in the opposite direction of the original. Density plots are separated by significant (blue) and nonsignificant (red) effects.
,
Reproducibility is a defining feature of science, but the extent to which it characterizes current research is unknown. We conducted replications of 100 experimental and correlational studies published in three psychology journals using high-powered designs and original materials when available. Replication effects were half the magnitude of original effects, representing a substantial decline. Ninety-seven percent of original studies had statistically significant results. Thirty-six percent of replications had statistically significant results; 47\% of original effect sizes were in the 95\% confidence interval of the replication effect size; 39\% of effects were subjectively rated to have replicated the original result; and if no bias in original results is assumed, combining original and replication results left 68\% with statistically significant effects. Correlational tests suggest that replication success was better predicted by the strength of original evidence than by characteristics of the original and replication teams.},
language = {en},
number = {6251},
urldate = {2023-03-19},
journal = {Science},
author = {{Open Science Collaboration}},
month = aug,
year = {2015},
pages = {aac4716},
file = {Open Science Collaboration - 2015 - Estimating the reproducibility of psychological sc.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\EQEQXCN2\\Open Science Collaboration - 2015 - Estimating the reproducibility of psychological sc.pdf:application/pdf},
}
@article{nosek_replicability_2022,
title = {Replicability, {Robustness}, and {Reproducibility} in {Psychological} {Science}},
volume = {73},
doi = {10.1146/annurev-psych-020821-114157},
abstract = {Replication—an important, uncommon, and misunderstood practice—is gaining appreciation in psychology. Achieving replicability is important for making research progress. If findings are not replicable, then prediction and theory development are stifled. If findings are replicable, then interrogation of their meaning and validity can advance knowledge. Assessing replicability can be productive for generating and testing hypotheses by actively confronting current understandings to identify weaknesses and spur innovation. For psychology, the 2010s might be characterized as a decade of active confrontation. Systematic and multi-site replication projects assessed current understandings and observed surprising failures to replicate many published findings. Replication efforts highlighted sociocultural challenges such as disincentives to conduct replications and a tendency to frame replication as a personal attack rather than a healthy scientific practice, and they raised awareness that replication contributes to self-correction. Nevertheless, innovation in doing and understanding replication and its cousins, reproducibility and robustness, has positioned psychology to improve research practices and accelerate progress.},
language = {en},
journal = {Annual Review of Psychology},
author = {Nosek, Brian A and Hardwicke, Tom E and Moshontz, Hannah and Allard, Aurélien and Corker, Katherine S and Dreber, Anna and Fidler, Fiona and Hilgard, Joe and Struhl, Melissa Kline and Nuijten, Michèle B and Rohrer, Julia M and Romero, Felipe and Scheel, Anne M and Scherer, Laura D and Schönbrodt, Felix D and Vazire, Simine},
year = {2022},
pages = {719--748},
file = {Nosek et al. - Replicability, Robustness, and Reproducibility in .pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\QHMCLFAQ\\Nosek et al. - Replicability, Robustness, and Reproducibility in .pdf:application/pdf},
}
@article{plesser_reproducibility_2018,
title = {Reproducibility vs. {Replicability}: {A} {Brief} {History} of a {Confused} {Terminology}},
volume = {11},
issn = {1662-5196},
shorttitle = {Reproducibility vs. {Replicability}},
url = {http://journal.frontiersin.org/article/10.3389/fninf.2017.00076/full},
doi = {10.3389/fninf.2017.00076},
language = {en},
urldate = {2023-03-20},
journal = {Frontiers in Neuroinformatics},
author = {Plesser, Hans E.},
month = jan,
year = {2018},
pages = {76},
file = {Plesser - 2018 - Reproducibility vs. Replicability A Brief History.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\QF3I7WA5\\Plesser - 2018 - Reproducibility vs. Replicability A Brief History.pdf:application/pdf},
}
@article{cacioppo_social_2015,
title = {Social, {Behavioral}, and {Economic} {Sciences} {Perspectives} on {Robust} and {Reliable} {Science}},
journal = {Report of the Subcommittee on Replicability in Science Advisory Committee to the National Science Foundation Directorate for Social, Behavioral, and Economic Sciences},
author = {Cacioppo, J.T. and Kaplan, R.M. and Krosnick, J.A. and Olds, J.L. and Dean, H.},
year = {2015},
file = {SBE_Robust_and_Reliable_Research_Report.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\PTY64D38\\SBE_Robust_and_Reliable_Research_Report.pdf:application/pdf},
}
@article{wilkinson_fair_2016,
title = {The {FAIR} {Guiding} {Principles} for scientific data management and stewardship},
volume = {3},
issn = {2052-4463},
url = {https://www.nature.com/articles/sdata201618},
doi = {10.1038/sdata.2016.18},
abstract = {Abstract
There is an urgent need to improve the infrastructure supporting the reuse of scholarly data. A diverse set of stakeholders—representing academia, industry, funding agencies, and scholarly publishers—have come together to design and jointly endorse a concise and measureable set of principles that we refer to as the FAIR Data Principles. The intent is that these may act as a guideline for those wishing to enhance the reusability of their data holdings. Distinct from peer initiatives that focus on the human scholar, the FAIR Principles put specific emphasis on enhancing the ability of machines to automatically find and use the data, in addition to supporting its reuse by individuals. This Comment is the first formal publication of the FAIR Principles, and includes the rationale behind them, and some exemplar implementations in the community.},
language = {en},
number = {1},
urldate = {2023-03-20},
journal = {Scientific Data},
author = {Wilkinson, Mark D. and Dumontier, Michel and Aalbersberg, IJsbrand Jan and Appleton, Gabrielle and Axton, Myles and Baak, Arie and Blomberg, Niklas and Boiten, Jan-Willem and da Silva Santos, Luiz Bonino and Bourne, Philip E. and Bouwman, Jildau and Brookes, Anthony J. and Clark, Tim and Crosas, Mercè and Dillo, Ingrid and Dumon, Olivier and Edmunds, Scott and Evelo, Chris T. and Finkers, Richard and Gonzalez-Beltran, Alejandra and Gray, Alasdair J.G. and Groth, Paul and Goble, Carole and Grethe, Jeffrey S. and Heringa, Jaap and ’t Hoen, Peter A.C and Hooft, Rob and Kuhn, Tobias and Kok, Ruben and Kok, Joost and Lusher, Scott J. and Martone, Maryann E. and Mons, Albert and Packer, Abel L. and Persson, Bengt and Rocca-Serra, Philippe and Roos, Marco and van Schaik, Rene and Sansone, Susanna-Assunta and Schultes, Erik and Sengstag, Thierry and Slater, Ted and Strawn, George and Swertz, Morris A. and Thompson, Mark and van der Lei, Johan and van Mulligen, Erik and Velterop, Jan and Waagmeester, Andra and Wittenburg, Peter and Wolstencroft, Katherine and Zhao, Jun and Mons, Barend},
month = mar,
year = {2016},
pages = {160018},
file = {Wilkinson et al. - 2016 - The FAIR Guiding Principles for scientific data ma.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\D9LNGMVG\\Wilkinson et al. - 2016 - The FAIR Guiding Principles for scientific data ma.pdf:application/pdf},
}
@article{goodman_what_2016,
title = {What does research reproducibility mean?},
volume = {341},
doi = {10.1126/scitranslmed.aaf5027},
abstract = {The language and conceptual framework of \{"\}research reproducibility\{"\} are nonstandard and unsettled across the sciences. In this Perspective, we review an array of explicit and implicit definitions of reproducibility and related terminology, and discuss how to avoid potential misunderstandings when these terms are used as a surrogate for \{"\}truth\{"\}."},
journal = {Science Translational Medicine},
author = {Goodman, Steven N. and Fanelli, Daniele and Ionnidis, John P. A.},
year = {2016},
}
@article{ochsner_rethinking_2002,
title = {Rethinking {Feelings}: {An} {fMRI} {Study} of the {Cognitive} {Regulation} of {Emotion}},
volume = {14},
issn = {0898-929X, 1530-8898},
shorttitle = {Rethinking {Feelings}},
url = {https://direct.mit.edu/jocn/article/14/8/1215/3708/Rethinking-Feelings-An-fMRI-Study-of-the-Cognitive},
doi = {10.1162/089892902760807212},
abstract = {Abstract
The ability to cognitively regulate emotional responses to aversive events is important for mental and physical health. Little is known, however, about neural bases of the cognitive control of emotion. The present study employed functional magnetic resonance imaging to examine the neural systems used to reappraise highly negative scenes in unemotional terms. Reappraisal of highly negative scenes reduced subjective experience of negative affect. Neural correlates of reappraisal were increased activation of the lateral and medial prefrontal regions and decreased activation of the amygdala and medial orbito-frontal cortex. These findings support the hypothesis that prefrontal cortex is involved in constructing reappraisal strategies that can modulate activity in multiple emotion-processing systems.},
language = {en},
number = {8},
urldate = {2023-04-02},
journal = {Journal of Cognitive Neuroscience},
author = {Ochsner, Kevin N. and Bunge, Silvia A. and Gross, James J. and Gabrieli, John D. E.},
month = nov,
year = {2002},
pages = {1215--1229},
file = {Ochsner et al. - 2002 - Rethinking Feelings An fMRI Study of the Cognitiv.pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\EKQQQUFH\\Ochsner et al. - 2002 - Rethinking Feelings An fMRI Study of the Cognitiv.pdf:application/pdf},
}
@inproceedings{matejka_same_2017,
address = {Denver Colorado USA},
title = {Same {Stats}, {Different} {Graphs}: {Generating} {Datasets} with {Varied} {Appearance} and {Identical} {Statistics} through {Simulated} {Annealing}},
isbn = {978-1-4503-4655-9},
shorttitle = {Same {Stats}, {Different} {Graphs}},
url = {https://dl.acm.org/doi/10.1145/3025453.3025912},
doi = {10.1145/3025453.3025912},
abstract = {Datasets which are identical over a number of statistical properties, yet produce dissimilar graphs, are frequently used to illustrate the importance of graphical representations when exploring data. This paper presents a novel method for generating such datasets, along with several examples. Our technique varies from previous approaches in that new datasets are iteratively generated from a seed dataset through random perturbations of individual data points, and can be directed towards a desired outcome through a simulated annealing optimization strategy. Our method has the benefit of being agnostic to the particular statistical properties that are to remain constant between the datasets, and allows for control over the graphical appearance of resulting output.},
language = {en},
urldate = {2023-04-14},
booktitle = {Proceedings of the 2017 {CHI} {Conference} on {Human} {Factors} in {Computing} {Systems}},
publisher = {ACM},
author = {Matejka, Justin and Fitzmaurice, George},
month = may,
year = {2017},
pages = {1290--1294},
file = {Matejka und Fitzmaurice - 2017 - Same Stats, Different Graphs Generating Datasets .pdf:C\:\\Users\\Gerda Wyssen\\Zotero\\storage\\CKC2VDZ8\\Matejka und Fitzmaurice - 2017 - Same Stats, Different Graphs Generating Datasets .pdf:application/pdf},
}