From fc51dd152cc555b81374cfe96c15372ecd33dd21 Mon Sep 17 00:00:00 2001
From: SamCH93 <samuel.pawel@gmail.com>
Date: Wed, 21 Dec 2022 08:22:40 +0100
Subject: [PATCH] add effect size type (in an ugly way, not finished), use
 natbib cite commands

---
 rsAbsence.Rnw | 43 +++++++++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/rsAbsence.Rnw b/rsAbsence.Rnw
index f3562ea..0764e9c 100755
--- a/rsAbsence.Rnw
+++ b/rsAbsence.Rnw
@@ -178,22 +178,23 @@ were excluded when assessing replication success based on significance.
 % especially true if the original study is under-powered.
 
 
-\textbf{To replicate or not to replicate an original ``null'' finding?}
-Because of the previously presented fallacy, original studies with
-non-significant effects are seldom replicated. Given the cost of replication
-studies, it is also unwise to advise replicating a study that has low changes of
-successful replication. To help deciding what studies are worth repeating,
-efforts to predict which studies have a higher chance to replicate successfully
-emerged \citep{Altmejd2019, Pawel2020}. Of note is that the chance of a successful replication
-intrinsically depends on the definition of replication success. If for a
-successful replication we need a ``significant result in the same direction in
-both the original and the replication study'' (i.e. the two-trials rule, \cite{Senn2008}),
-replicating a non-significant original result does indeed not make any sense.
-However, the use of significance as sole criterion for replication success has
-its shortcomings and other definitions for replication success have been proposed
-\cite{Simonsohn2015, Ly2018, Hedges2019, Held2020}. Additionally, replication
-studies have to be well-design too in order to ensure high enough replication power
-\cite{Anderson2017, Micheloud2020}.
+\textbf{To replicate or not to replicate an original ``null'' finding?} Because
+of the previously presented fallacy, original studies with non-significant
+effects are seldom replicated. Given the cost of replication studies, it is also
+unwise to advise replicating a study that has low changes of successful
+replication. To help deciding what studies are worth repeating, efforts to
+predict which studies have a higher chance to replicate successfully emerged
+\citep{Altmejd2019, Pawel2020}. Of note is that the chance of a successful
+replication intrinsically depends on the definition of replication success. If
+for a successful replication we need a ``significant result in the same
+direction in both the original and the replication study'' \citep[i.e. the
+two-trials rule][]{Senn2008}, replicating a non-significant original result does
+indeed not make any sense. However, the use of significance as sole criterion
+for replication success has its shortcomings and other definitions for
+replication success have been proposed \citep{Simonsohn2015, Ly2018, Hedges2019,
+  Held2020}. Additionally, replication studies have to be well-design too in
+order to ensure high enough replication power \citep{Anderson2017,
+  Micheloud2020}.
 
 According to \citet{Anderson2016}, if the goal of a replications is to infer a null effect
 evidence for the null hypothesis has to be provided. To achieve this they recommend to use
@@ -207,9 +208,9 @@ Of the 158 effects presented in 23 original studies that were repeated in the
 RPCB \citep{Errington2021} 14\% (22) were interpreted as ``null
 effects''.
 % Note that the attempt to replicate all the experiments from the original study
-% was not completed because of some unforeseen issues in the implementation (see
-% \cite{Errington2021b} for more details on the unfinished registered reports in
-% the RPCB).
+% was not completed because of some unforeseen issues in the implementation
+% \citep[see][for more details on the unfinished registered reports in the
+% RPCB]{Errington2021b}.
 Figure~\ref{fig:nullfindings} shows effect estimates with confidence
 intervals for these original ``null findings'' (with $p_{o} > 0.05$) and their
 replication studies from the project.
@@ -235,6 +236,7 @@ rpcb <- rpcbRaw %>%
            experiment = Experiment..,
            effect = Effect..,
            internalReplication = Internal.replication..,
+           effectType = Effect.size.type,
            po = Original.p.value,
            smdo = Original.effect.size..SMD.,
            so = Original.standard.error..SMD.,
@@ -279,7 +281,7 @@ rpcbNull <- rpcb %>%
 \begin{figure}[!htb]
 << "plot-null-findings-rpcb", fig.height =8.5 >>=
 ggplot(data = rpcbNull) +
-  facet_wrap(~ id, scales = "free", ncol = 4) +
+  facet_wrap(~ id + effectType, scales = "free", ncol = 4) +
   geom_hline(yintercept = 0, lty = 2, alpha = 0.5) +
   geom_pointrange(aes(x = "Original", y = smdo, ymin = smdo - 2*so,
                       ymax = smdo + 2*so)) +
@@ -444,6 +446,7 @@ cat("\\newpage \\section*{Computational details}")
 @
 
 << "sessionInfo2", echo = Reproducibility, results = Reproducibility >>=
+cat(paste(Sys.time(), Sys.timezone(), "\n"))
 sessionInfo()
 @
 
-- 
GitLab