package cluster import ( "context" "errors" "git.noahlan.cn/northlan/ngs/internal/env" "google.golang.org/grpc" "sync" "sync/atomic" "time" ) type connPool struct { index uint32 v []*grpc.ClientConn } type rpcClient struct { sync.RWMutex isClosed bool pools map[string]*connPool } func newConnArray(maxSize uint, addr string) (*connPool, error) { a := &connPool{ index: 0, v: make([]*grpc.ClientConn, maxSize), } if err := a.init(addr); err != nil { return nil, err } return a, nil } func (a *connPool) init(addr string) error { for i := range a.v { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) conn, err := grpc.DialContext( ctx, addr, env.GrpcOptions..., ) cancel() if err != nil { // Cleanup if the initialization fails. a.Close() return err } a.v[i] = conn } return nil } func (a *connPool) Get() *grpc.ClientConn { next := atomic.AddUint32(&a.index, 1) % uint32(len(a.v)) return a.v[next] } func (a *connPool) Close() { for i, c := range a.v { if c != nil { err := c.Close() if err != nil { // TODO: error handling } a.v[i] = nil } } } func newRPCClient() *rpcClient { return &rpcClient{ pools: make(map[string]*connPool), } } func (c *rpcClient) getConnPool(addr string) (*connPool, error) { c.RLock() if c.isClosed { c.RUnlock() return nil, errors.New("rpc client is closed") } array, ok := c.pools[addr] c.RUnlock() if !ok { var err error array, err = c.createConnPool(addr) if err != nil { return nil, err } } return array, nil } func (c *rpcClient) createConnPool(addr string) (*connPool, error) { c.Lock() defer c.Unlock() array, ok := c.pools[addr] if !ok { var err error // TODO: make conn count configurable array, err = newConnArray(10, addr) if err != nil { return nil, err } c.pools[addr] = array } return array, nil } func (c *rpcClient) closePool() { c.Lock() if !c.isClosed { c.isClosed = true // close all connections for _, array := range c.pools { array.Close() } } c.Unlock() }