Using dplyr and tidyr, reshape the data from wide to long, then group by count.
library(dplyr)
library(tidyr)
gather(df1, key = author, value = name, -venue) %>%
select(venue, name) %>%
group_by(venue) %>%
summarise(n = n_distinct(name, na.rm = TRUE))
# # A tibble: 3 × 2
# venue n
# <chr> <int>
# 1 A 2
# 2 B 5
# 3 C 2
data
df1 <- read.table(text ="
venue,author0,author1,author2
A,John,Mary,NA
B,Peter,Jacob,Isabella
C,Lia,NA,NA
B,Jacob,Lara,John
C,Mary,NA,NA
B,Isabella,NA,NA
", header = TRUE, sep = ",", stringsAsFactors = FALSE)
Edit: Saved your Excel sheet as CSV, then read in using read.csv, then above code returns below output:
df1 <- read.csv("Journal_Conferences_Authors.csv", na.strings = "#N/A")
# output
# # A tibble: 427 × 2
# venue n
# <fctr> <int>
# 1 AAAI 4
# 2 ACC 4
# 3 ACIS-ICIS 5
# 4 ACM SIGSOFT Software Engineering Notes 1
# 5 ACM Southeast Regional Conference 5
# 6 ACM TIST 3
# 7 ACM Trans. Comput.-Hum. Interact. 3
# 8 ACML 2
# 9 ADMA 2
# 10 Advanced Visual Interfaces 3
# # ... with 417 more rows