```
package main
import (
"context"
"flag"
"fmt"
"strconv"
"strings"
"sync"
"sync/atomic"
)
var workers int
var endpoints string
var urlsDone int64
var urlsError int64
func main() {
flag.IntVar(&workers, "workers", 5, "number of workers")
flag.StringVar(&endpoints, "endpoints", "1,2,3,a,b", "list of endpoints comma delimited")
// parse
flag.Parse()
// pass wg as pointer
wg := &sync.WaitGroup{}
// to quit all workers when all messages have been treated.
ctx, cancel := context.WithCancel(context.Background())
tasks := make(chan string)
defer close(tasks)
done := make(chan bool)
defer close(done)
// good practice
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(ctx, wg, tasks, done)
}
msgs := strings.Split(endpoints, ",")
go func() {
for _, endpoint := range msgs {
tasks <- endpoint
}
}()
// wait for all messages to be treated
for i := 0; i < len(msgs); i++ {
<-done
}
// signal workers to quit.
cancel()
// wait for workers to finish gracefully.
wg.Wait()
fmt.Printf("work done. successfully=%d errors=%d", urlsDone, urlsError)
}
func worker(ctx context.Context, wg *sync.WaitGroup, urls chan string, done chan bool) {
// The wg is still necessary. In case there's going to be cleanup when `ctx.Done` receives.
// You want `main` to wait until clean up is done for all workers.
defer wg.Done()
for {
select {
case msg := <-urls:
//_, err := http.Get(msg)
// for convenience
_, err := strconv.ParseInt(msg, 10, 32)
if err != nil {
// atomically update shared variables
atomic.AddInt64(&urlsError, 1)
} else {
atomic.AddInt64(&urlsDone, 1)
}
done <- true
case <-ctx.Done():
// quit worker.
// cleanup if necessary
return
}
}
}
```