As best as I can tell this is a simple KNN analysis. The alternative "no distance matrix" component that the ESRI help explains seems quite undesirable. Basically they are using a K-means clustering with a region growing approach using random seeding. This seems very unstable and could return highly variable results. It seems like they are performing a bit of maneuvering to avoid some issues like disconnected regions so it may take some doing to exactly recreate their results. You can approximate the "spatially constrained" option in spdep. Here is a brief example of a distance analysis that will give you a starting point. Keep in mind that in order to assign "classes" you will need to set up some type of looping structure.
require(sp)
require(spdep)
data(meuse)
coordinates(meuse) <- ~x+y
# Create distance matrix of specified range
meuse.dist <- dnearneigh(coordinates(meuse), 0.0001, 1000)
# Coerce distance object to a list object with distances for each observation
dist.list <- nbdists(meuse.dist, coordinates(meuse))
# Create a new column with the distance to the nearest observation using lapply and unlist
meuse@data <- data.frame(meuse@data, NNDist=unlist(lapply(dist.list, FUN=function(x) min(x))))
# Plot results
spplot(meuse, "NNDist", col.regions=colorRampPalette(c("blue","yellow","red"),
interpolate="spline")(10) )
You may want to also explore Hierarchical Clustering. However, for larger data sets, hclust needs a triangular distance matrix whereas dnearneigh does not. Here is an example using constrained hierarchical clustering.
# SPATIALLY CONSTRAINED CLUSTERING
require(sp)
require(rioja)
data(meuse)
coordinates(meuse) <- ~x+y
cdat <- data.frame(x=coordinates(meuse)[,1],y=coordinates(meuse)[,2])
rownames(cdat) <- rownames(meuse@data)
chc <- chclust(dist(cdat), method="conslink")
# KNN
chc.n3 <- cutree(chc, k=3)
# DISTANCE
chc.d200 <- cutree(chc, h=200)
meuse@data <- data.frame(meuse@data, KNN=as.factor(chc.n3), DClust=chc.d200)
opar <- par
par(mfcol=c(1,2))
cols <- topo.colors(length(unique(meuse@data$KNN)))
color <- rep("xx", nrow(meuse@data))
for(i in 1:length(unique(meuse@data$KNN))) {
v <- unique(meuse@data$KNN)[i]
color[(meuse@data$KNN == v)] <- cols[i]
}
plot(meuse, col=color, pch=19, main="KNN Clustering")
cols <- topo.colors(length(unique(meuse@data$DClust)))
color <- rep("xx", nrow(meuse@data))
for(i in 1:length(unique(meuse@data$DClust))) {
v <- unique(meuse@data$DClust)[i]
color[(meuse@data$DClust == v)] <- cols[i]
}
plot(meuse, col=color, pch=19, main="Distance Clustering")
par <- opar