0

I have three elements consisting of 1 million, 2 million and 3 million ints respectively. I want to insert them to redis such that they all happen concurrently and the total execution time is not greater than the execution time of the 3 million ints. I tried using sync.Waitgroup but it doesn't speed up the execution. Here's my basic code.

package main

import (
    "log"
    "strconv"
    "time"

    "gopkg.in/redis.v5"
)

func main() {
    oneMillion := makeRange(1, 1000000)
    twoMillion := makeRange(1000001, 3000000)
    threeMillion := makeRange(3000001, 6000000)
    elements := [][]int{oneMillion, twoMillion, threeMillion}
    client := redis.NewClient(&redis.Options{
        Addr:         "localhost:6379",
        Password:     "",
        DB:           0,
        DialTimeout:  60 * time.Second,
        WriteTimeout: 60 * time.Second,
        ReadTimeout:  60 * time.Second,
        PoolTimeout:  60 * time.Second,
    })
    pipeline := client.Pipeline()
    for _, elem := range elements {
        for i := 0; i < len(elem); i++ {
            key := "KEY:" + strconv.Itoa(elem[i])
            val := "VAL:" + strconv.Itoa(elem[i])
            cmd := pipeline.Set(key, val, 0)
            if cmd.Err() != nil {
                log.Fatal("cmd error: ", cmd.Err())
            }
        }
        _, err := pipeline.Exec()
        if err != nil {
            log.Fatal("error: ", err)
        }
    }
}

func makeRange(min, max int) []int {
    a := make([]int, max-min+1)
    for i := range a {
        a[i] = min + i
    }
    return a
}

1 Answers1

0

More or less every operation on redis is atomic , i.e only one operation will be executing on the redis server at any given time. So in theory if you want to do say 3 million set operations on a redis server all those operations will happen serially.

gipsy
  • 3,859
  • 1
  • 13
  • 21