Building scalable concurrent systems in Go involves designing applications that efficiently utilize resources, handle a high volume of requests, and remain responsive under load. This section covers the key principles and techniques for achieving scalability in concurrent systems.
Horizontal vs. Vertical Scaling:
Goroutine Lifecycle Management:
Worker Pool Pattern:
gofunc worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Printf("Worker %d processing job %d\n", id, j)
time.Sleep(time.Second) // Simulate work
results <- j * 2
}
}
func main() {
jobs := make(chan int, 100)
results := make(chan int, 100)
// Start workers
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// Send jobs to workers
for j := 1; j <= 9; j++ {
jobs <- j
}
close(jobs)
// Collect results
for a := 1; a <= 9; a++ {
fmt.Println("Result:", <-results)
}
}
Channel Buffering:
gofunc main() {
ch := make(chan int, 10)
go func() {
for i := 0; i < 20; i++ {
ch <- i
fmt.Println("Sent:", i)
}
close(ch)
}()
go func() {
for v := range ch {
fmt.Println("Received:", v)
}
}()
time.Sleep(time.Second * 3)
}
Fan-Out and Fan-In:
gofunc fanOut(ch chan int, n int) []chan int {
outs := make([]chan int, n)
for i := range outs {
outs[i] = make(chan int, 10)
go func(out chan int) {
for v := range ch {
out <- v * 2
}
close(out)
}(outs[i])
}
return outs
}
func fanIn(chs []chan int) chan int {
out := make(chan int)
var wg sync.WaitGroup
wg.Add(len(chs))
for _, ch := range chs {
go func(ch chan int) {
for v := range ch {
out <- v
}
wg.Done()
}(ch)
}
go func() {
wg.Wait()
close(out)
}()
return out
}
func main() {
ch := make(chan int, 100)
for i := 1; i <= 20; i++ {
ch <- i
}
close(ch)
outs := fanOut(ch, 4)
result := fanIn(outs)
for v := range result {
fmt.Println("Result:", v)
}
}
Limiting Concurrency with Semaphores:
gofunc worker(sem chan struct{}, id int) {
defer func() { <-sem }()
fmt.Printf("Worker %d started\n", id)
time.Sleep(time.Second)
fmt.Printf("Worker %d finished\n", id)
}
func main() {
sem := make(chan struct{}, 5) // Limit to 5 concurrent workers
for i := 0; i < 20; i++ {
sem <- struct{}{}
go worker(sem, i)
}
// Wait for all workers to finish
for i := 0; i < cap(sem); i++ {
sem <- struct{}{}
}
}
Rate Limiting:
gofunc rateLimiter(limit int, interval time.Duration) chan struct{} {
ch := make(chan struct{}, limit)
go func() {
ticker := time.NewTicker(interval)
for range ticker.C {
for i := 0; i < limit; i++ {
ch <- struct{}{}
}
}
}()
return ch
}
func main() {
rl := rateLimiter(5, time.Second)
for i := 0; i < 20; i++ {
<-rl
go func(i int) {
fmt.Printf("Handling request %d\n", i)
time.Sleep(time.Millisecond * 500)
}(i)
}
time.Sleep(time.Second * 5)
}
Context for Cancellation and Timeouts:
context
package to manage cancellation and timeouts in goroutines.gofunc worker(ctx context.Context, id int, ch chan<- int) {
select {
case <-time.After(time.Second):
fmt.Printf("Worker %d done\n", id)
ch <- id
case <-ctx.Done():
fmt.Printf("Worker %d canceled\n", id)
}
}
func main() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
defer cancel()
ch := make(chan int)
for i := 0; i < 5; i++ {
go worker(ctx, i, ch)
}
for i := 0; i < 5; i++ {
select {
case result := <-ch:
fmt.Println("Received:", result)
case <-ctx.Done():
fmt.Println("Timeout")
return
}
}
}
Error Handling Patterns:
gofunc worker(id int, errCh chan<- error) {
if id%2 == 0 {
errCh <- fmt.Errorf("worker %d encountered an error", id)
return
}
fmt.Printf("Worker %d completed successfully\n", id)
errCh <- nil
}
func main() {
errCh := make(chan error)
for i := 0; i < 5; i++ {
go worker(i, errCh)
}
for i := 0; i < 5; i++ {
if err := <-errCh; err != nil {
fmt.Println("Error:", err)
}
}
}
pprof
, trace
) to identify bottlenecks and optimize performance.Example of Using pprof:
goimport (
"net/http"
_ "net/http/pprof"
)
func main() {
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
// Application code here
}
By understanding and applying these principles and techniques, you can build highly scalable concurrent systems in Go that effectively utilize resources, handle high loads, and remain responsive under stress.