3

Consider the following test data set using R:

testdat<-data.frame("id"=c(rep(1,5),rep(2,5),rep(3,5)),
                    "period"=rep(seq(1:5),3),
                    "treat"=c(c(0,1,1,1,0),c(0,0,1,1,1),c(0,0,1,1,1)),
                    "state"=c(rep(0,5),c(0,1,1,1,1),c(0,0,0,1,1)),
                    "int"=c(rep(0,13),1,1))
testdat
   id period treat state int
1   1      1     0     0   0
2   1      2     1     0   0
3   1      3     1     0   0
4   1      4     1     0   0
5   1      5     0     0   0
6   2      1     0     0   0
7   2      2     0     1   0
8   2      3     1     1   0
9   2      4     1     1   0
10  2      5     1     1   0
11  3      1     0     0   0
12  3      2     0     0   0
13  3      3     1     0   0
14  3      4     1     1   1
15  3      5     1     1   1

The first 4 variables are what I have, int is the variable I want to make. It is similar to an interaction between treat and state, but that would include 1s in rows 8-10 which is not desired. Essentially, I only want an interaction when state changes during treat but not otherwise. Any thoughts on how to create this (especially on a large scale for a dataset with a million observations)?

Edit: For clarification on why I want this measure. I want to run something like the following regression:

lm(outcome~treat+state+I(treat*state))

But I'm really interested in the interaction only when treat straddles a change in state. If I were to run the above regression, I(treat*state) pools the effect of the interaction I'm interested in and when treat is 1 entirely when state is 1. In theory, I think these will have two different effects so I need to disaggregate them. I hope this makes sense and I am happy to provide additional details.

dkro
  • 193
  • 1
  • 8

3 Answers3

3

I'm sure this is possible in base R, but here's a tidyversion:

library(dplyr)
testdat %>%
  group_by(grp = cumsum(c(FALSE, diff(treat) > 0))) %>%
  mutate(int2 = +(state > 0 & first(state) == 0 & treat > 0)) %>%
  ungroup() %>%
  select(-grp)
# # A tibble: 15 x 6
#       id period treat state   int  int2
#    <dbl>  <int> <dbl> <dbl> <dbl> <int>
#  1     1      1     0     0     0     0
#  2     1      2     1     0     0     0
#  3     1      3     1     0     0     0
#  4     1      4     1     0     0     0
#  5     1      5     0     0     0     0
#  6     2      1     0     0     0     0
#  7     2      2     0     1     0     0
#  8     2      3     1     1     0     0
#  9     2      4     1     1     0     0
# 10     2      5     1     1     0     0
# 11     3      1     0     0     0     0
# 12     3      2     0     0     0     0
# 13     3      3     1     0     0     0
# 14     3      4     1     1     1     1
# 15     3      5     1     1     1     1

Alternative logic for grouping uses run-length encoding, effectively the same (suggested you https://stackoverflow.com/a/35313426):

testdat %>%
  group_by(grp = { yy <- rle(treat); rep(seq_along(yy$lengths), yy$lengths); }) %>%
  # ...

And as in that answer, I wish dplyr had an equivalent to data.table's rleid. The expected logic is to be able to group by consecutive same-values in a column, but not the same value across all rows. If you look at this mid-pipe (before cleaning up grp), you'd see

testdat %>%
  group_by(grp = { yy <- rle(treat); rep(seq_along(yy$lengths), yy$lengths); }) %>%
  mutate(int2 = +(state > 0 & first(state) == 0 & treat > 0)) %>%
  ungroup()
# # A tibble: 15 x 7
#       id period treat state   int   grp  int2
#    <dbl>  <int> <dbl> <dbl> <dbl> <int> <int>
#  1     1      1     0     0     0     1     0
#  2     1      2     1     0     0     2     0
#  3     1      3     1     0     0     2     0
#  4     1      4     1     0     0     2     0
#  5     1      5     0     0     0     3     0
#  6     2      1     0     0     0     3     0
#  7     2      2     0     1     0     3     0
#  8     2      3     1     1     0     4     0
#  9     2      4     1     1     0     4     0
# 10     2      5     1     1     0     4     0
# 11     3      1     0     0     0     5     0
# 12     3      2     0     0     0     5     0
# 13     3      3     1     0     0     6     0
# 14     3      4     1     1     1     6     1
# 15     3      5     1     1     1     6     1

But that's just wishful thinking. I guess I could also do

my_rleid <- function(x) { yy <- rle(x); rep(seq_along(yy$lengths), yy$lengths); }
testdat %>%
  group_by(grp = my_rleid(treat)) %>%
  # ...
r2evans
  • 141,215
  • 6
  • 77
  • 149
1

Here is a base R way using rle and ave.

r <- rle(testdat$treat)
r$values <- cumsum(r$values) + seq_along(r$values)
int2 <- +(ave(testdat$state, inverse.rle(r), FUN = function(x) x != x[1]) & testdat$treat == 1)
testdat <- cbind(testdat, int2)

testdat
#   id period treat state int int2
#1   1      1     0     0   0    0
#2   1      2     1     0   0    0
#3   1      3     1     0   0    0
#4   1      4     1     0   0    0
#5   1      5     0     0   0    0
#6   2      1     0     0   0    0
#7   2      2     0     1   0    0
#8   2      3     1     1   0    0
#9   2      4     1     1   0    0
#10  2      5     1     1   0    0
#11  3      1     0     0   0    0
#12  3      2     0     0   0    0
#13  3      3     1     0   0    0
#14  3      4     1     1   1    1
#15  3      5     1     1   1    1

Timings

Since the question mentions performance as an issue, the real use case data set has 1 million rows, here are the timings of my solution and the one by r2evans.

Write both solutions as functions.

library(dplyr)

f1 <- function(X){
  r <- rle(X$treat)
  r$values <- cumsum(r$values) + seq_along(r$values)
  int2 <- +(ave(X$state, inverse.rle(r), FUN = function(x) x != x[1]) & testdat$treat == 1)
  cbind(X, int2)
}

f2 <- function(X){
  X %>%
    group_by(grp = cumsum(c(FALSE, diff(treat) > 0))) %>%
    mutate(int2 = +(state > 0 & first(state) == 0 & treat > 0)) %>%
    ungroup() %>%
    select(-grp)
}

How many copies of testdat are needed.

log2(1e6/nrow(testdat))
#[1] 16.02468

df1 <- testdat
for(i in 1:15) df1 <- rbind(df1, df1)
nrow(df1)
#[1] 491520

That is half a million, should be enough for a test.

mb <- microbenchmark::microbenchmark(
  base = f1(df1),
  dplyr = f2(df1),
  times = 10
)

rm(df1)    # tidy up
print(mb, unit = "relative", order = "median")
#Unit: relative
#  expr      min       lq     mean   median       uq      max neval
#  base 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000    10
# dplyr 1.283237 1.359772 1.331494 1.369062 1.316815 1.256968    10

The base R solution is around 36% faster.

Rui Barradas
  • 70,273
  • 8
  • 34
  • 66
0

Another base version using also ave.

testdat$treat & c(0, diff(testdat$state))==1 goes to TRUE when state changes from 0 to 1 when treat is 1. testdat$treat & testdat$state goes to 1 when both are 1.

testdat$int2 <- +ave(testdat$treat & c(0, diff(testdat$state))==1,
  cumsum(c(0, abs(diff(testdat$treat & testdat$state)))),
  FUN=function(x) rep(x[1], length(x)))
testdat
#   id period treat state int int2
#1   1      1     0     0   0    0
#2   1      2     1     0   0    0
#3   1      3     1     0   0    0
#4   1      4     1     0   0    0
#5   1      5     0     0   0    0
#6   2      1     0     0   0    0
#7   2      2     0     1   0    0
#8   2      3     1     1   0    0
#9   2      4     1     1   0    0
#10  2      5     1     1   0    0
#11  3      1     0     0   0    0
#12  3      2     0     0   0    0
#13  3      3     1     0   0    0
#14  3      4     1     1   1    1
#15  3      5     1     1   1    1

Or using Reduce:

testdat$int2 <- Reduce(function(x,y) {if(y==-1) 0 else if(x==1 || y==1) 1 else 0},
 (testdat$treat & c(0, diff(testdat$state))==1) -c(0, diff(testdat$treat &
  testdat$state) == -1), accumulate = TRUE)

Timings (continue from @Rui-Barradas):

f3 <- function(testdat) {cbind(testdat, int2=+ave(testdat$treat &
 c(0, diff(testdat$state))==1, cumsum(c(0, abs(diff(testdat$treat &
 testdat$state)))), FUN=function(x) rep(x[1], length(x))))}
f4 <- function(testdat) {cbind(testdat, int2=Reduce(function(x,y) {
 if(y==-1) 0 else if(x==1 || y==1) 1 else 0}, (testdat$treat & c(0,
 diff(testdat$state))==1) -c(0, diff(testdat$treat & testdat$state) == -1),
 accumulate = TRUE))}

microbenchmark::microbenchmark(base = f1(df1), dplyr = f2(df1),
 GKi1 = f3(df1), GKi2 = f4(df1), times = 10)
#Unit: milliseconds
#  expr       min        lq     mean    median        uq       max neval  cld
#  base 1132.7269 1188.7439 1233.106 1226.8532 1293.9901 1364.8358    10   c 
# dplyr 1376.0856 1436.4027 1466.418 1458.7240 1509.8990 1559.7976    10    d
#  GKi1  960.5438 1006.8803 1029.105 1022.6114 1065.7427 1074.6027    10  b  
#  GKi2  588.0484  667.2482  694.415  699.0845  739.5523  786.1819    10 a   
GKi
  • 37,245
  • 2
  • 26
  • 48