feat: 基本完成TCP/WS端,tcp包协议,数据包协议

main
NorthLan 3 years ago
parent 9d7de95373
commit cc68630e25

@ -0,0 +1,13 @@
Copyright [2022-2025] [NorthLan]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1 @@
protoc --go_opt=paths=source_relative --go_out=. --proto_path=. *.proto

@ -0,0 +1 @@
./protoc --go_out=. *.proto

@ -0,0 +1,203 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: test.proto
package testdata
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Ping struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Content string `protobuf:"bytes,1,opt,name=Content,proto3" json:"Content,omitempty"`
}
func (x *Ping) Reset() {
*x = Ping{}
if protoimpl.UnsafeEnabled {
mi := &file_test_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Ping) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Ping) ProtoMessage() {}
func (x *Ping) ProtoReflect() protoreflect.Message {
mi := &file_test_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Ping.ProtoReflect.Descriptor instead.
func (*Ping) Descriptor() ([]byte, []int) {
return file_test_proto_rawDescGZIP(), []int{0}
}
func (x *Ping) GetContent() string {
if x != nil {
return x.Content
}
return ""
}
type Pong struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Content string `protobuf:"bytes,2,opt,name=Content,proto3" json:"Content,omitempty"`
}
func (x *Pong) Reset() {
*x = Pong{}
if protoimpl.UnsafeEnabled {
mi := &file_test_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pong) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pong) ProtoMessage() {}
func (x *Pong) ProtoReflect() protoreflect.Message {
mi := &file_test_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pong.ProtoReflect.Descriptor instead.
func (*Pong) Descriptor() ([]byte, []int) {
return file_test_proto_rawDescGZIP(), []int{1}
}
func (x *Pong) GetContent() string {
if x != nil {
return x.Content
}
return ""
}
var File_test_proto protoreflect.FileDescriptor
var file_test_proto_rawDesc = []byte{
0x0a, 0x0a, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74, 0x65,
0x73, 0x74, 0x64, 0x61, 0x74, 0x61, 0x22, 0x20, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x18,
0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67,
0x12, 0x18, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x42, 0x0b, 0x5a, 0x09, 0x2f, 0x74,
0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_test_proto_rawDescOnce sync.Once
file_test_proto_rawDescData = file_test_proto_rawDesc
)
func file_test_proto_rawDescGZIP() []byte {
file_test_proto_rawDescOnce.Do(func() {
file_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_proto_rawDescData)
})
return file_test_proto_rawDescData
}
var file_test_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_test_proto_goTypes = []interface{}{
(*Ping)(nil), // 0: testdata.Ping
(*Pong)(nil), // 1: testdata.Pong
}
var file_test_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_test_proto_init() }
func file_test_proto_init() {
if File_test_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Ping); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_test_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pong); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_test_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_test_proto_goTypes,
DependencyIndexes: file_test_proto_depIdxs,
MessageInfos: file_test_proto_msgTypes,
}.Build()
File_test_proto = out.File
file_test_proto_rawDesc = nil
file_test_proto_goTypes = nil
file_test_proto_depIdxs = nil
}

@ -0,0 +1,13 @@
syntax = "proto3";
package testdata;
option go_package = "/testdata";
message Ping {
string Content = 1;
}
message Pong {
string Content = 2;
}

@ -0,0 +1,92 @@
package cluster
import (
"context"
"net"
"ngs/cluster/clusterpb"
"ngs/internal/message"
"ngs/mock"
"ngs/session"
)
type acceptor struct {
sid int64
gateClient clusterpb.MemberClient
session *session.Session
lastMid uint64
rpcHandler rpcHandler
gateAddr string
}
// Push implements the session.NetworkEntity interface
func (a *acceptor) Push(route string, v interface{}) error {
// TODO: buffer
data, err := message.Serialize(v)
if err != nil {
return err
}
request := &clusterpb.PushMessage{
SessionId: a.sid,
Route: route,
Data: data,
}
_, err = a.gateClient.HandlePush(context.Background(), request)
return err
}
// RPC implements the session.NetworkEntity interface
func (a *acceptor) RPC(route string, v interface{}) error {
// TODO: buffer
data, err := message.Serialize(v)
if err != nil {
return err
}
msg := &message.Message{
Type: message.Notify,
Route: route,
Data: data,
}
a.rpcHandler(a.session, msg, true)
return nil
}
// LastMid implements the session.NetworkEntity interface
func (a *acceptor) LastMid() uint64 {
return a.lastMid
}
// Response implements the session.NetworkEntity interface
func (a *acceptor) Response(v interface{}) error {
return a.ResponseMid(a.lastMid, v)
}
// ResponseMid implements the session.NetworkEntity interface
func (a *acceptor) ResponseMid(mid uint64, v interface{}) error {
// TODO: buffer
data, err := message.Serialize(v)
if err != nil {
return err
}
request := &clusterpb.ResponseMessage{
SessionId: a.sid,
Id: mid,
Data: data,
}
_, err = a.gateClient.HandleResponse(context.Background(), request)
return err
}
// Close implements the session.NetworkEntity interface
func (a *acceptor) Close() error {
// TODO: buffer
request := &clusterpb.CloseSessionRequest{
SessionId: a.sid,
}
_, err := a.gateClient.CloseSession(context.Background(), request)
return err
}
// RemoteAddr implements the session.NetworkEntity interface
func (*acceptor) RemoteAddr() net.Addr {
return mock.NetAddr{}
}

@ -0,0 +1,299 @@
package cluster
import (
"errors"
"fmt"
"net"
"ngs/internal/codec"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/message"
"ngs/internal/packet"
"ngs/pipeline"
"ngs/scheduler"
"ngs/session"
"reflect"
"sync/atomic"
"time"
)
const (
agentWriteBacklog = 16
)
var (
// ErrBrokenPipe represents the low-level connection has broken.
ErrBrokenPipe = errors.New("broken low-level pipe")
// ErrBufferExceed indicates that the current session buffer is full and
// can not receive more data.
ErrBufferExceed = errors.New("session send buffer exceed")
)
type (
// Agent corresponding a user, used for store raw conn information
agent struct {
// regular agent member
session *session.Session // session
conn net.Conn // low-level conn fd
lastMid uint64 // last message id
state int32 // current agent state
chDie chan struct{} // wait for close
chSend chan pendingMessage // push message queue
lastAt int64 // last heartbeat unix time stamp
decoder *codec.Decoder // binary decoder
pipeline pipeline.Pipeline
rpcHandler rpcHandler
srv reflect.Value // cached session reflect.Value
}
pendingMessage struct {
typ message.Type // message type
route string // message route(push)
mid uint64 // response message id(response)
payload interface{} // payload
}
)
// Create new agent instance
func newAgent(conn net.Conn, pipeline pipeline.Pipeline, rpcHandler rpcHandler) *agent {
a := &agent{
conn: conn,
state: statusStart,
chDie: make(chan struct{}),
lastAt: time.Now().Unix(),
chSend: make(chan pendingMessage, agentWriteBacklog),
decoder: codec.NewDecoder(),
pipeline: pipeline,
rpcHandler: rpcHandler,
}
// binding session
s := session.New(a)
a.session = s
a.srv = reflect.ValueOf(s)
return a
}
func (a *agent) send(m pendingMessage) (err error) {
defer func() {
if e := recover(); e != nil {
err = ErrBrokenPipe
}
}()
a.chSend <- m
return
}
// LastMid implements the session.NetworkEntity interface
func (a *agent) LastMid() uint64 {
return a.lastMid
}
// Push implementation for session.NetworkEntity interface
func (a *agent) Push(route string, v interface{}) error {
if a.status() == statusClosed {
return ErrBrokenPipe
}
if len(a.chSend) >= agentWriteBacklog {
return ErrBufferExceed
}
if env.Debug {
switch d := v.(type) {
case []byte:
log.Println(fmt.Sprintf("Type=Push, ID=%d, UID=%d, Route=%s, Data=%dbytes",
a.session.ID(), a.session.UID(), route, len(d)))
default:
log.Println(fmt.Sprintf("Type=Push, ID=%d, UID=%d, Route=%s, Data=%+v",
a.session.ID(), a.session.UID(), route, v))
}
}
return a.send(pendingMessage{typ: message.Push, route: route, payload: v})
}
// RPC implementation for session.NetworkEntity interface
func (a *agent) RPC(route string, v interface{}) error {
if a.status() == statusClosed {
return ErrBrokenPipe
}
// TODO: buffer
data, err := message.Serialize(v)
if err != nil {
return err
}
msg := &message.Message{
Type: message.Notify,
Route: route,
Data: data,
}
a.rpcHandler(a.session, msg, true)
return nil
}
// Response implementation for session.NetworkEntity interface
// Response message to session
func (a *agent) Response(v interface{}) error {
return a.ResponseMid(a.lastMid, v)
}
// ResponseMid implementation for session.NetworkEntity interface
// Response message to session
func (a *agent) ResponseMid(mid uint64, v interface{}) error {
if a.status() == statusClosed {
return ErrBrokenPipe
}
if mid <= 0 {
return ErrSessionOnNotify
}
if len(a.chSend) >= agentWriteBacklog {
return ErrBufferExceed
}
if env.Debug {
switch d := v.(type) {
case []byte:
log.Println(fmt.Sprintf("Type=Response, ID=%d, UID=%d, MID=%d, Data=%dbytes",
a.session.ID(), a.session.UID(), mid, len(d)))
default:
log.Println(fmt.Sprintf("Type=Response, ID=%d, UID=%d, MID=%d, Data=%+v",
a.session.ID(), a.session.UID(), mid, v))
}
}
return a.send(pendingMessage{typ: message.Response, mid: mid, payload: v})
}
// Close implementation for session.NetworkEntity interface
// Close closes the agent, clean inner state and close low-level connection.
// Any blocked Read or Write operations will be unblocked and return errors.
func (a *agent) Close() error {
if a.status() == statusClosed {
return ErrCloseClosedSession
}
a.setStatus(statusClosed)
if env.Debug {
log.Println(fmt.Sprintf("Session closed, ID=%d, UID=%d, IP=%s",
a.session.ID(), a.session.UID(), a.conn.RemoteAddr()))
}
// prevent closing closed channel
select {
case <-a.chDie:
// expect
default:
close(a.chDie)
scheduler.PushTask(func() { session.Lifetime.Close(a.session) })
}
return a.conn.Close()
}
// RemoteAddr implementation for session.NetworkEntity interface
// returns the remote network address.
func (a *agent) RemoteAddr() net.Addr {
return a.conn.RemoteAddr()
}
// String, implementation for Stringer interface
func (a *agent) String() string {
return fmt.Sprintf("Remote=%s, LastTime=%d", a.conn.RemoteAddr().String(), atomic.LoadInt64(&a.lastAt))
}
func (a *agent) status() int32 {
return atomic.LoadInt32(&a.state)
}
func (a *agent) setStatus(state int32) {
atomic.StoreInt32(&a.state, state)
}
func (a *agent) write() {
ticker := time.NewTicker(env.Heartbeat)
chWrite := make(chan []byte, agentWriteBacklog)
// clean func
defer func() {
ticker.Stop()
close(a.chSend)
close(chWrite)
_ = a.Close()
if env.Debug {
log.Println(fmt.Sprintf("Session write goroutine exit, SessionID=%d, UID=%d", a.session.ID(), a.session.UID()))
}
}()
for {
select {
case <-ticker.C:
deadline := time.Now().Add(-2 * env.Heartbeat).Unix()
if atomic.LoadInt64(&a.lastAt) < deadline {
log.Println(fmt.Sprintf("Session heartbeat timeout, LastTime=%d, Deadline=%d", atomic.LoadInt64(&a.lastAt), deadline))
return
}
chWrite <- hbd
case data := <-chWrite:
// close agent while low-level conn broken
if _, err := a.conn.Write(data); err != nil {
log.Println(err.Error())
return
}
case data := <-a.chSend:
payload, err := message.Serialize(data.payload)
if err != nil {
switch data.typ {
case message.Push:
log.Println(fmt.Sprintf("Push: %s error: %s", data.route, err.Error()))
case message.Response:
log.Println(fmt.Sprintf("Response message(id: %d) error: %s", data.mid, err.Error()))
default:
// expect
}
break
}
// construct message and encode
m := &message.Message{
Type: data.typ,
Data: payload,
Route: data.route,
ID: data.mid,
}
if pipe := a.pipeline; pipe != nil {
err := pipe.Outbound().Process(a.session, m)
if err != nil {
log.Println("broken pipeline", err.Error())
break
}
}
em, err := m.Encode()
if err != nil {
log.Println(err.Error())
break
}
// packet encode
p, err := codec.Encode(packet.Data, em)
if err != nil {
log.Println(err)
break
}
chWrite <- p
case <-a.chDie: // agent closed signal
return
case <-env.Die: // application quit
return
}
}
}

@ -0,0 +1,172 @@
package cluster
import (
"context"
"fmt"
"ngs/cluster/clusterpb"
"ngs/internal/log"
"sync"
)
// cluster represents a ngs cluster, which contains a bunch of ngs nodes
// and each of them provide a group of different services. All services requests
// from client will send to gate firstly and be forwarded to appropriate node.
type cluster struct {
// If cluster is not large enough, use slice is OK
currentNode *Node
rpcClient *rpcClient
mu sync.RWMutex
members []*Member
}
func newCluster(currentNode *Node) *cluster {
return &cluster{currentNode: currentNode}
}
// Register implements the MasterServer gRPC service
func (c *cluster) Register(_ context.Context, req *clusterpb.RegisterRequest) (*clusterpb.RegisterResponse, error) {
if req.MemberInfo == nil {
return nil, ErrInvalidRegisterReq
}
resp := &clusterpb.RegisterResponse{}
for _, m := range c.members {
if m.memberInfo.ServiceAddr == req.MemberInfo.ServiceAddr {
return nil, fmt.Errorf("address %s has registered", req.MemberInfo.ServiceAddr)
}
}
// Notify registered node to update remote services
newMember := &clusterpb.NewMemberRequest{MemberInfo: req.MemberInfo}
for _, m := range c.members {
resp.Members = append(resp.Members, m.memberInfo)
if m.isMaster {
continue
}
pool, err := c.rpcClient.getConnPool(m.memberInfo.ServiceAddr)
if err != nil {
return nil, err
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.NewMember(context.Background(), newMember)
if err != nil {
return nil, err
}
}
log.Println("New peer register to cluster", req.MemberInfo.ServiceAddr)
// Register services to current node
c.currentNode.handler.addRemoteService(req.MemberInfo)
c.mu.Lock()
c.members = append(c.members, &Member{isMaster: false, memberInfo: req.MemberInfo})
c.mu.Unlock()
return resp, nil
}
// Unregister implements the MasterServer gRPC service
func (c *cluster) Unregister(_ context.Context, req *clusterpb.UnregisterRequest) (*clusterpb.UnregisterResponse, error) {
if req.ServiceAddr == "" {
return nil, ErrInvalidRegisterReq
}
var index = -1
resp := &clusterpb.UnregisterResponse{}
for i, m := range c.members {
if m.memberInfo.ServiceAddr == req.ServiceAddr {
index = i
break
}
}
if index < 0 {
return nil, fmt.Errorf("address %s has notregistered", req.ServiceAddr)
}
// Notify registered node to update remote services
delMember := &clusterpb.DelMemberRequest{ServiceAddr: req.ServiceAddr}
for _, m := range c.members {
if m.MemberInfo().ServiceAddr == c.currentNode.ServiceAddr {
continue
}
pool, err := c.rpcClient.getConnPool(m.memberInfo.ServiceAddr)
if err != nil {
return nil, err
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.DelMember(context.Background(), delMember)
if err != nil {
return nil, err
}
}
log.Println("Exists peer unregister to cluster", req.ServiceAddr)
// Register services to current node
c.currentNode.handler.delMember(req.ServiceAddr)
c.mu.Lock()
if index == len(c.members)-1 {
c.members = c.members[:index]
} else {
c.members = append(c.members[:index], c.members[index+1:]...)
}
c.mu.Unlock()
return resp, nil
}
func (c *cluster) setRpcClient(client *rpcClient) {
c.rpcClient = client
}
func (c *cluster) remoteAddrs() []string {
var addrs []string
c.mu.RLock()
for _, m := range c.members {
addrs = append(addrs, m.memberInfo.ServiceAddr)
}
c.mu.RUnlock()
return addrs
}
func (c *cluster) initMembers(members []*clusterpb.MemberInfo) {
c.mu.Lock()
for _, info := range members {
c.members = append(c.members, &Member{
memberInfo: info,
})
}
c.mu.Unlock()
}
func (c *cluster) addMember(info *clusterpb.MemberInfo) {
c.mu.Lock()
var found bool
for _, member := range c.members {
if member.memberInfo.ServiceAddr == info.ServiceAddr {
member.memberInfo = info
found = true
break
}
}
if !found {
c.members = append(c.members, &Member{
memberInfo: info,
})
}
c.mu.Unlock()
}
func (c *cluster) delMember(addr string) {
c.mu.Lock()
var index = -1
for i, member := range c.members {
if member.memberInfo.ServiceAddr == addr {
index = i
break
}
}
if index != -1 {
c.members = append(c.members[:index], c.members[index+1:]...)
}
c.mu.Unlock()
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,475 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.19.4
// source: cluster.proto
package clusterpb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// MasterClient is the client API for Master service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type MasterClient interface {
Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error)
Unregister(ctx context.Context, in *UnregisterRequest, opts ...grpc.CallOption) (*UnregisterResponse, error)
}
type masterClient struct {
cc grpc.ClientConnInterface
}
func NewMasterClient(cc grpc.ClientConnInterface) MasterClient {
return &masterClient{cc}
}
func (c *masterClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) {
out := new(RegisterResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Master/Register", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *masterClient) Unregister(ctx context.Context, in *UnregisterRequest, opts ...grpc.CallOption) (*UnregisterResponse, error) {
out := new(UnregisterResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Master/Unregister", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// MasterServer is the server API for Master service.
// All implementations should embed UnimplementedMasterServer
// for forward compatibility
type MasterServer interface {
Register(context.Context, *RegisterRequest) (*RegisterResponse, error)
Unregister(context.Context, *UnregisterRequest) (*UnregisterResponse, error)
}
// UnimplementedMasterServer should be embedded to have forward compatible implementations.
type UnimplementedMasterServer struct {
}
func (UnimplementedMasterServer) Register(context.Context, *RegisterRequest) (*RegisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Register not implemented")
}
func (UnimplementedMasterServer) Unregister(context.Context, *UnregisterRequest) (*UnregisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Unregister not implemented")
}
// UnsafeMasterServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to MasterServer will
// result in compilation errors.
type UnsafeMasterServer interface {
mustEmbedUnimplementedMasterServer()
}
func RegisterMasterServer(s grpc.ServiceRegistrar, srv MasterServer) {
s.RegisterService(&Master_ServiceDesc, srv)
}
func _Master_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RegisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).Register(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Master/Register",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).Register(ctx, req.(*RegisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Master_Unregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnregisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).Unregister(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Master/Unregister",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).Unregister(ctx, req.(*UnregisterRequest))
}
return interceptor(ctx, in, info, handler)
}
// Master_ServiceDesc is the grpc.ServiceDesc for Master service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Master_ServiceDesc = grpc.ServiceDesc{
ServiceName: "clusterpb.Master",
HandlerType: (*MasterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Register",
Handler: _Master_Register_Handler,
},
{
MethodName: "Unregister",
Handler: _Master_Unregister_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "cluster.proto",
}
// MemberClient is the client API for Member service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type MemberClient interface {
HandleRequest(ctx context.Context, in *RequestMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error)
HandleNotify(ctx context.Context, in *NotifyMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error)
HandlePush(ctx context.Context, in *PushMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error)
HandleResponse(ctx context.Context, in *ResponseMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error)
NewMember(ctx context.Context, in *NewMemberRequest, opts ...grpc.CallOption) (*NewMemberResponse, error)
DelMember(ctx context.Context, in *DelMemberRequest, opts ...grpc.CallOption) (*DelMemberResponse, error)
SessionClosed(ctx context.Context, in *SessionClosedRequest, opts ...grpc.CallOption) (*SessionClosedResponse, error)
CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error)
}
type memberClient struct {
cc grpc.ClientConnInterface
}
func NewMemberClient(cc grpc.ClientConnInterface) MemberClient {
return &memberClient{cc}
}
func (c *memberClient) HandleRequest(ctx context.Context, in *RequestMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error) {
out := new(MemberHandleResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/HandleRequest", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) HandleNotify(ctx context.Context, in *NotifyMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error) {
out := new(MemberHandleResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/HandleNotify", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) HandlePush(ctx context.Context, in *PushMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error) {
out := new(MemberHandleResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/HandlePush", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) HandleResponse(ctx context.Context, in *ResponseMessage, opts ...grpc.CallOption) (*MemberHandleResponse, error) {
out := new(MemberHandleResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/HandleResponse", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) NewMember(ctx context.Context, in *NewMemberRequest, opts ...grpc.CallOption) (*NewMemberResponse, error) {
out := new(NewMemberResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/NewMember", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) DelMember(ctx context.Context, in *DelMemberRequest, opts ...grpc.CallOption) (*DelMemberResponse, error) {
out := new(DelMemberResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/DelMember", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) SessionClosed(ctx context.Context, in *SessionClosedRequest, opts ...grpc.CallOption) (*SessionClosedResponse, error) {
out := new(SessionClosedResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/SessionClosed", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *memberClient) CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error) {
out := new(CloseSessionResponse)
err := c.cc.Invoke(ctx, "/clusterpb.Member/CloseSession", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// MemberServer is the server API for Member service.
// All implementations should embed UnimplementedMemberServer
// for forward compatibility
type MemberServer interface {
HandleRequest(context.Context, *RequestMessage) (*MemberHandleResponse, error)
HandleNotify(context.Context, *NotifyMessage) (*MemberHandleResponse, error)
HandlePush(context.Context, *PushMessage) (*MemberHandleResponse, error)
HandleResponse(context.Context, *ResponseMessage) (*MemberHandleResponse, error)
NewMember(context.Context, *NewMemberRequest) (*NewMemberResponse, error)
DelMember(context.Context, *DelMemberRequest) (*DelMemberResponse, error)
SessionClosed(context.Context, *SessionClosedRequest) (*SessionClosedResponse, error)
CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error)
}
// UnimplementedMemberServer should be embedded to have forward compatible implementations.
type UnimplementedMemberServer struct {
}
func (UnimplementedMemberServer) HandleRequest(context.Context, *RequestMessage) (*MemberHandleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HandleRequest not implemented")
}
func (UnimplementedMemberServer) HandleNotify(context.Context, *NotifyMessage) (*MemberHandleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HandleNotify not implemented")
}
func (UnimplementedMemberServer) HandlePush(context.Context, *PushMessage) (*MemberHandleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HandlePush not implemented")
}
func (UnimplementedMemberServer) HandleResponse(context.Context, *ResponseMessage) (*MemberHandleResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method HandleResponse not implemented")
}
func (UnimplementedMemberServer) NewMember(context.Context, *NewMemberRequest) (*NewMemberResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method NewMember not implemented")
}
func (UnimplementedMemberServer) DelMember(context.Context, *DelMemberRequest) (*DelMemberResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DelMember not implemented")
}
func (UnimplementedMemberServer) SessionClosed(context.Context, *SessionClosedRequest) (*SessionClosedResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SessionClosed not implemented")
}
func (UnimplementedMemberServer) CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CloseSession not implemented")
}
// UnsafeMemberServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to MemberServer will
// result in compilation errors.
type UnsafeMemberServer interface {
mustEmbedUnimplementedMemberServer()
}
func RegisterMemberServer(s grpc.ServiceRegistrar, srv MemberServer) {
s.RegisterService(&Member_ServiceDesc, srv)
}
func _Member_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RequestMessage)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).HandleRequest(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/HandleRequest",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).HandleRequest(ctx, req.(*RequestMessage))
}
return interceptor(ctx, in, info, handler)
}
func _Member_HandleNotify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NotifyMessage)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).HandleNotify(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/HandleNotify",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).HandleNotify(ctx, req.(*NotifyMessage))
}
return interceptor(ctx, in, info, handler)
}
func _Member_HandlePush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PushMessage)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).HandlePush(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/HandlePush",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).HandlePush(ctx, req.(*PushMessage))
}
return interceptor(ctx, in, info, handler)
}
func _Member_HandleResponse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResponseMessage)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).HandleResponse(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/HandleResponse",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).HandleResponse(ctx, req.(*ResponseMessage))
}
return interceptor(ctx, in, info, handler)
}
func _Member_NewMember_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NewMemberRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).NewMember(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/NewMember",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).NewMember(ctx, req.(*NewMemberRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Member_DelMember_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DelMemberRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).DelMember(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/DelMember",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).DelMember(ctx, req.(*DelMemberRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Member_SessionClosed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SessionClosedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).SessionClosed(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/SessionClosed",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).SessionClosed(ctx, req.(*SessionClosedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Member_CloseSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CloseSessionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MemberServer).CloseSession(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/clusterpb.Member/CloseSession",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MemberServer).CloseSession(ctx, req.(*CloseSessionRequest))
}
return interceptor(ctx, in, info, handler)
}
// Member_ServiceDesc is the grpc.ServiceDesc for Member service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Member_ServiceDesc = grpc.ServiceDesc{
ServiceName: "clusterpb.Member",
HandlerType: (*MemberServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "HandleRequest",
Handler: _Member_HandleRequest_Handler,
},
{
MethodName: "HandleNotify",
Handler: _Member_HandleNotify_Handler,
},
{
MethodName: "HandlePush",
Handler: _Member_HandlePush_Handler,
},
{
MethodName: "HandleResponse",
Handler: _Member_HandleResponse_Handler,
},
{
MethodName: "NewMember",
Handler: _Member_NewMember_Handler,
},
{
MethodName: "DelMember",
Handler: _Member_DelMember_Handler,
},
{
MethodName: "SessionClosed",
Handler: _Member_SessionClosed_Handler,
},
{
MethodName: "CloseSession",
Handler: _Member_CloseSession_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "cluster.proto",
}

@ -0,0 +1,95 @@
syntax = "proto3";
package clusterpb;
option go_package = "/clusterpb";
message MemberInfo {
string label = 1;
string serviceAddr = 2;
repeated string services = 3;
}
message RegisterRequest {
MemberInfo memberInfo = 1;
}
message RegisterResponse {
repeated MemberInfo members = 1;
}
message UnregisterRequest {
string serviceAddr = 1;
}
message UnregisterResponse {}
service Master {
rpc Register (RegisterRequest) returns (RegisterResponse) {}
rpc Unregister (UnregisterRequest) returns (UnregisterResponse) {}
}
message RequestMessage {
string gateAddr = 1;
int64 sessionId = 2;
uint64 id = 3;
string route = 4;
bytes data = 5;
}
message NotifyMessage {
string gateAddr = 1;
int64 sessionId = 2;
string route = 3;
bytes data = 4;
}
message ResponseMessage {
int64 sessionId = 1;
uint64 id = 2;
bytes data = 3;
}
message PushMessage {
int64 sessionId = 1;
string route = 2;
bytes data = 3;
}
message MemberHandleResponse {}
message NewMemberRequest {
MemberInfo memberInfo = 1;
}
message NewMemberResponse {}
message DelMemberRequest {
string serviceAddr = 1;
}
message DelMemberResponse {}
message SessionClosedRequest {
int64 sessionId = 1;
}
message SessionClosedResponse {}
message CloseSessionRequest {
int64 sessionId = 1;
}
message CloseSessionResponse {}
service Member {
rpc HandleRequest (RequestMessage) returns (MemberHandleResponse) {}
rpc HandleNotify (NotifyMessage) returns (MemberHandleResponse) {}
rpc HandlePush (PushMessage) returns (MemberHandleResponse) {}
rpc HandleResponse (ResponseMessage) returns (MemberHandleResponse) {}
rpc NewMember (NewMemberRequest) returns (NewMemberResponse) {}
rpc DelMember (DelMemberRequest) returns (DelMemberResponse) {}
rpc SessionClosed(SessionClosedRequest) returns(SessionClosedResponse) {}
rpc CloseSession(CloseSessionRequest) returns(CloseSessionResponse) {}
}

@ -0,0 +1 @@
protoc --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative --go-grpc_opt=require_unimplemented_servers=false --go_out=.. --go-grpc_out=.. --proto_path=. *.proto

@ -0,0 +1,122 @@
package cluster
import (
"context"
"errors"
"google.golang.org/grpc"
"ngs/internal/env"
"sync"
"sync/atomic"
"time"
)
type connPool struct {
index uint32
v []*grpc.ClientConn
}
type rpcClient struct {
sync.RWMutex
isClosed bool
pools map[string]*connPool
}
func newConnArray(maxSize uint, addr string) (*connPool, error) {
a := &connPool{
index: 0,
v: make([]*grpc.ClientConn, maxSize),
}
if err := a.init(addr); err != nil {
return nil, err
}
return a, nil
}
func (a *connPool) init(addr string) error {
for i := range a.v {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
conn, err := grpc.DialContext(
ctx,
addr,
env.GrpcOptions...,
)
cancel()
if err != nil {
// Cleanup if the initialization fails.
a.Close()
return err
}
a.v[i] = conn
}
return nil
}
func (a *connPool) Get() *grpc.ClientConn {
next := atomic.AddUint32(&a.index, 1) % uint32(len(a.v))
return a.v[next]
}
func (a *connPool) Close() {
for i, c := range a.v {
if c != nil {
err := c.Close()
if err != nil {
// TODO: error handling
}
a.v[i] = nil
}
}
}
func newRPCClient() *rpcClient {
return &rpcClient{
pools: make(map[string]*connPool),
}
}
func (c *rpcClient) getConnPool(addr string) (*connPool, error) {
c.RLock()
if c.isClosed {
c.RUnlock()
return nil, errors.New("rpc client is closed")
}
array, ok := c.pools[addr]
c.RUnlock()
if !ok {
var err error
array, err = c.createConnPool(addr)
if err != nil {
return nil, err
}
}
return array, nil
}
func (c *rpcClient) createConnPool(addr string) (*connPool, error) {
c.Lock()
defer c.Unlock()
array, ok := c.pools[addr]
if !ok {
var err error
// TODO: make conn count configurable
array, err = newConnArray(10, addr)
if err != nil {
return nil, err
}
c.pools[addr] = array
}
return array, nil
}
func (c *rpcClient) closePool() {
c.Lock()
if !c.isClosed {
c.isClosed = true
// close all connections
for _, array := range c.pools {
array.Close()
}
}
c.Unlock()
}

@ -0,0 +1,10 @@
package cluster
import "errors"
// Errors that could be occurred during message handling.
var (
ErrSessionOnNotify = errors.New("current session working on notify mode")
ErrCloseClosedSession = errors.New("close closed session")
ErrInvalidRegisterReq = errors.New("invalid register request")
)

@ -0,0 +1,445 @@
package cluster
import (
"context"
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
"math/rand"
"net"
"ngs/cluster/clusterpb"
"ngs/component"
"ngs/internal/codec"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/message"
"ngs/internal/packet"
"ngs/pipeline"
"ngs/scheduler"
"ngs/session"
"reflect"
"sort"
"strings"
"sync"
"time"
)
var (
// cached serialized data
hrd []byte // handshake response data
hbd []byte // heartbeat packet data
)
type rpcHandler func(session *session.Session, msg *message.Message, noCopy bool)
func cache() {
data, err := json.Marshal(map[string]interface{}{
"code": 200,
"sys": map[string]float64{"heartbeat": env.Heartbeat.Seconds()},
})
if err != nil {
panic(err)
}
hrd, err = codec.Encode(packet.Handshake, data)
if err != nil {
panic(err)
}
hbd, err = codec.Encode(packet.Heartbeat, nil)
if err != nil {
panic(err)
}
}
type LocalHandler struct {
localServices map[string]*component.Service // all registered service
localHandlers map[string]*component.Handler // all handler method
mu sync.RWMutex
remoteServices map[string][]*clusterpb.MemberInfo
pipeline pipeline.Pipeline
currentNode *Node
}
func NewHandler(currentNode *Node, pipeline pipeline.Pipeline) *LocalHandler {
h := &LocalHandler{
localServices: make(map[string]*component.Service),
localHandlers: make(map[string]*component.Handler),
remoteServices: map[string][]*clusterpb.MemberInfo{},
pipeline: pipeline,
currentNode: currentNode,
}
return h
}
func (h *LocalHandler) register(comp component.Component, opts []component.Option) error {
s := component.NewService(comp, opts)
if _, ok := h.localServices[s.Name]; ok {
return fmt.Errorf("handler: service already defined: %s", s.Name)
}
if err := s.ExtractHandler(); err != nil {
return err
}
// register all localHandlers
h.localServices[s.Name] = s
for name, handler := range s.Handlers {
n := fmt.Sprintf("%s.%s", s.Name, name)
log.Println("Register local handler", n)
h.localHandlers[n] = handler
}
return nil
}
func (h *LocalHandler) initRemoteService(members []*clusterpb.MemberInfo) {
for _, m := range members {
h.addRemoteService(m)
}
}
func (h *LocalHandler) addRemoteService(member *clusterpb.MemberInfo) {
h.mu.Lock()
defer h.mu.Unlock()
for _, s := range member.Services {
log.Println("Register remote service", s)
h.remoteServices[s] = append(h.remoteServices[s], member)
}
}
func (h *LocalHandler) delMember(addr string) {
h.mu.Lock()
defer h.mu.Unlock()
for name, members := range h.remoteServices {
for i, maddr := range members {
if addr == maddr.ServiceAddr {
members = append(members[:i], members[i+1:]...)
}
}
if len(members) == 0 {
delete(h.remoteServices, name)
} else {
h.remoteServices[name] = members
}
}
}
func (h *LocalHandler) LocalService() []string {
var result []string
for service := range h.localServices {
result = append(result, service)
}
sort.Strings(result)
return result
}
func (h *LocalHandler) RemoteService() []string {
h.mu.RLock()
defer h.mu.RUnlock()
var result []string
for service := range h.remoteServices {
result = append(result, service)
}
sort.Strings(result)
return result
}
func (h *LocalHandler) handle(conn net.Conn) {
// create a client agent and startup write gorontine
agent := newAgent(conn, h.pipeline, h.remoteProcess)
h.currentNode.storeSession(agent.session)
// startup write goroutine
go agent.write()
if env.Debug {
log.Println(fmt.Sprintf("New session established: %s", agent.String()))
}
// guarantee agent related resource be destroyed
defer func() {
request := &clusterpb.SessionClosedRequest{
SessionId: agent.session.ID(),
}
members := h.currentNode.cluster.remoteAddrs()
for _, remote := range members {
log.Println("Notify remote server success", remote)
pool, err := h.currentNode.rpcClient.getConnPool(remote)
if err != nil {
log.Println("Cannot retrieve connection pool for address", remote, err)
continue
}
client := clusterpb.NewMemberClient(pool.Get())
_, err = client.SessionClosed(context.Background(), request)
if err != nil {
log.Println("Cannot closed session in remote address", remote, err)
continue
}
if env.Debug {
log.Println("Notify remote server success", remote)
}
}
agent.Close()
if env.Debug {
log.Println(fmt.Sprintf("Session read goroutine exit, SessionID=%d, UID=%d", agent.session.ID(), agent.session.UID()))
}
}()
// read loop
buf := make([]byte, 2048)
for {
n, err := conn.Read(buf)
if err != nil {
log.Println(fmt.Sprintf("Read message error: %s, session will be closed immediately", err.Error()))
return
}
// TODO(warning): decoder use slice for performance, packet data should be copy before next Decode
packets, err := agent.decoder.Decode(buf[:n])
if err != nil {
log.Println(err.Error())
// process packets decoded
for _, p := range packets {
if err := h.processPacket(agent, p); err != nil {
log.Println(err.Error())
return
}
}
return
}
// process all packets
for _, p := range packets {
if err := h.processPacket(agent, p); err != nil {
log.Println(err.Error())
return
}
}
}
}
func (h *LocalHandler) processPacket(agent *agent, p *packet.Packet) error {
switch p.Type {
case packet.Handshake:
if err := env.HandshakeValidator(p.Data); err != nil {
return err
}
if _, err := agent.conn.Write(hrd); err != nil {
return err
}
agent.setStatus(statusHandshake)
if env.Debug {
log.Println(fmt.Sprintf("Session handshake Id=%d, Remote=%s", agent.session.ID(), agent.conn.RemoteAddr()))
}
case packet.HandshakeAck:
agent.setStatus(statusWorking)
if env.Debug {
log.Println(fmt.Sprintf("Receive handshake ACK Id=%d, Remote=%s", agent.session.ID(), agent.conn.RemoteAddr()))
}
case packet.Data:
if agent.status() < statusWorking {
return fmt.Errorf("receive data on socket which not yet ACK, session will be closed immediately, remote=%s",
agent.conn.RemoteAddr().String())
}
msg, err := message.Decode(p.Data)
if err != nil {
return err
}
h.processMessage(agent, msg)
case packet.Heartbeat:
// expected
}
agent.lastAt = time.Now().Unix()
return nil
}
func (h *LocalHandler) findMembers(service string) []*clusterpb.MemberInfo {
h.mu.RLock()
defer h.mu.RUnlock()
return h.remoteServices[service]
}
func (h *LocalHandler) remoteProcess(session *session.Session, msg *message.Message, noCopy bool) {
index := strings.LastIndex(msg.Route, ".")
if index < 0 {
log.Println(fmt.Sprintf("ngs/handler: invalid route %s", msg.Route))
return
}
service := msg.Route[:index]
members := h.findMembers(service)
if len(members) == 0 {
log.Println(fmt.Sprintf("ngs/handler: %s not found(forgot registered?)", msg.Route))
return
}
// Select a remote service address
// 1. Use the service address directly if the router contains binding item
// 2. Select a remote service address randomly and bind to router
var remoteAddr string
if addr, found := session.Router().Find(service); found {
remoteAddr = addr
} else {
remoteAddr = members[rand.Intn(len(members))].ServiceAddr
session.Router().Bind(service, remoteAddr)
}
pool, err := h.currentNode.rpcClient.getConnPool(remoteAddr)
if err != nil {
log.Println(err)
return
}
var data = msg.Data
if !noCopy && len(msg.Data) > 0 {
data = make([]byte, len(msg.Data))
copy(data, msg.Data)
}
// Retrieve gate address and session id
gateAddr := h.currentNode.ServiceAddr
sessionId := session.ID()
switch v := session.NetworkEntity().(type) {
case *acceptor:
gateAddr = v.gateAddr
sessionId = v.sid
}
client := clusterpb.NewMemberClient(pool.Get())
switch msg.Type {
case message.Request:
request := &clusterpb.RequestMessage{
GateAddr: gateAddr,
SessionId: sessionId,
Id: msg.ID,
Route: msg.Route,
Data: data,
}
_, err = client.HandleRequest(context.Background(), request)
case message.Notify:
request := &clusterpb.NotifyMessage{
GateAddr: gateAddr,
SessionId: sessionId,
Route: msg.Route,
Data: data,
}
_, err = client.HandleNotify(context.Background(), request)
}
if err != nil {
log.Println(fmt.Sprintf("Process remote message (%d:%s) error: %+v", msg.ID, msg.Route, err))
}
}
func (h *LocalHandler) processMessage(agent *agent, msg *message.Message) {
var lastMid uint64
switch msg.Type {
case message.Request:
lastMid = msg.ID
case message.Notify:
lastMid = 0
default:
log.Println("Invalid message type: " + msg.Type.String())
return
}
handler, found := h.localHandlers[msg.Route]
if !found {
h.remoteProcess(agent.session, msg, false)
} else {
h.localProcess(handler, lastMid, agent.session, msg)
}
}
func (h *LocalHandler) handleWS(conn *websocket.Conn) {
c, err := newWSConn(conn)
if err != nil {
log.Println(err)
return
}
go h.handle(c)
}
func (h *LocalHandler) localProcess(handler *component.Handler, lastMid uint64, session *session.Session, msg *message.Message) {
if pipe := h.pipeline; pipe != nil {
err := pipe.Inbound().Process(session, msg)
if err != nil {
log.Println("Pipeline process failed: " + err.Error())
return
}
}
var payload = msg.Data
var data interface{}
if handler.IsRawArg {
data = payload
} else {
data = reflect.New(handler.Type.Elem()).Interface()
err := env.Serializer.Unmarshal(payload, data)
if err != nil {
log.Println(fmt.Sprintf("Deserialize to %T failed: %+v (%v)", data, err, payload))
return
}
}
if env.Debug {
log.Println(fmt.Sprintf("UID=%d, Message={%s}, Data=%+v", session.UID(), msg.String(), data))
}
args := []reflect.Value{handler.Receiver, reflect.ValueOf(session), reflect.ValueOf(data)}
task := func() {
switch v := session.NetworkEntity().(type) {
case *agent:
v.lastMid = lastMid
case *acceptor:
v.lastMid = lastMid
}
result := handler.Method.Func.Call(args)
if len(result) > 0 {
if err := result[0].Interface(); err != nil {
log.Println(fmt.Sprintf("Service %s error: %+v", msg.Route, err))
}
}
}
index := strings.LastIndex(msg.Route, ".")
if index < 0 {
log.Println(fmt.Sprintf("ngs/handler: invalid route %s", msg.Route))
return
}
// A message can be dispatch to global thread or a user customized thread
service := msg.Route[:index]
if s, found := h.localServices[service]; found && s.SchedulerName != "" {
sched := session.Value(s.SchedulerName)
if sched == nil {
log.Println(fmt.Sprintf("nanl/handler: cannot found `schedular.LocalScheduler` by %s", s.SchedulerName))
return
}
local, ok := sched.(scheduler.LocalScheduler)
if !ok {
log.Println(fmt.Sprintf("nanl/handler: Type %T does not implement the `schedular.LocalScheduler` interface",
sched))
return
}
local.Schedule(task)
} else {
scheduler.PushTask(task)
}
}

@ -0,0 +1,12 @@
package cluster
import "ngs/cluster/clusterpb"
type Member struct {
isMaster bool
memberInfo *clusterpb.MemberInfo
}
func (m *Member) MemberInfo() *clusterpb.MemberInfo {
return m.memberInfo
}

@ -0,0 +1,393 @@
package cluster
import (
"context"
"errors"
"fmt"
"github.com/gorilla/websocket"
"google.golang.org/grpc"
"net"
"net/http"
"ngs/cluster/clusterpb"
"ngs/component"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/message"
"ngs/pipeline"
"ngs/scheduler"
"ngs/session"
"strings"
"sync"
"time"
)
// Options contains some configurations for current node
type Options struct {
Pipeline pipeline.Pipeline
IsMaster bool
AdvertiseAddr string
RetryInterval time.Duration
ClientAddr string
Components *component.Components
Label string
IsWebsocket bool
TSLCertificate string
TSLKey string
}
// Node represents a node in ngs cluster, which will contains a group of services.
// All services will register to cluster and messages will be forwarded to the node
// which provides respective service
type Node struct {
Options // current node options
ServiceAddr string // current server service address (RPC)
cluster *cluster
handler *LocalHandler
server *grpc.Server
rpcClient *rpcClient
mu sync.RWMutex
sessions map[int64]*session.Session
}
func (n *Node) Startup() error {
if n.ServiceAddr == "" {
return errors.New("service address cannot be empty in master node")
}
n.sessions = map[int64]*session.Session{}
n.cluster = newCluster(n)
n.handler = NewHandler(n, n.Pipeline)
components := n.Components.List()
for _, c := range components {
err := n.handler.register(c.Comp, c.Opts)
if err != nil {
return err
}
}
cache()
if err := n.initNode(); err != nil {
return err
}
// Initialize all components
for _, c := range components {
c.Comp.Init()
}
for _, c := range components {
c.Comp.AfterInit()
}
if n.ClientAddr != "" {
go func() {
if n.IsWebsocket {
if len(n.TSLCertificate) != 0 {
n.listenAndServeWSTLS()
} else {
n.listenAndServeWS()
}
} else {
n.listenAndServe()
}
}()
}
return nil
}
func (n *Node) Handler() *LocalHandler {
return n.handler
}
func (n *Node) initNode() error {
// Current node is not master server and does not contains master
// address, so running in singleton mode
if !n.IsMaster && n.AdvertiseAddr == "" {
return nil
}
listener, err := net.Listen("tcp", n.ServiceAddr)
if err != nil {
return err
}
// Initialize the gRPC server and register service
n.server = grpc.NewServer()
n.rpcClient = newRPCClient()
clusterpb.RegisterMemberServer(n.server, n)
go func() {
err := n.server.Serve(listener)
if err != nil {
log.Fatalf("Start current node failed: %v", err)
}
}()
if n.IsMaster {
clusterpb.RegisterMasterServer(n.server, n.cluster)
member := &Member{
isMaster: true,
memberInfo: &clusterpb.MemberInfo{
Label: n.Label,
ServiceAddr: n.ServiceAddr,
Services: n.handler.LocalService(),
},
}
n.cluster.members = append(n.cluster.members, member)
n.cluster.setRpcClient(n.rpcClient)
} else {
pool, err := n.rpcClient.getConnPool(n.AdvertiseAddr)
if err != nil {
return err
}
client := clusterpb.NewMasterClient(pool.Get())
request := &clusterpb.RegisterRequest{
MemberInfo: &clusterpb.MemberInfo{
Label: n.Label,
ServiceAddr: n.ServiceAddr,
Services: n.handler.LocalService(),
},
}
for {
resp, err := client.Register(context.Background(), request)
if err == nil {
n.handler.initRemoteService(resp.Members)
n.cluster.initMembers(resp.Members)
break
}
log.Println("Register current node to cluster failed", err, "and will retry in", n.RetryInterval.String())
time.Sleep(n.RetryInterval)
}
}
return nil
}
// Shutdowns all components registered by application, that
// call by reverse order against register
func (n *Node) Shutdown() {
// reverse call `BeforeShutdown` hooks
components := n.Components.List()
length := len(components)
for i := length - 1; i >= 0; i-- {
components[i].Comp.BeforeShutdown()
}
// reverse call `Shutdown` hooks
for i := length - 1; i >= 0; i-- {
components[i].Comp.Shutdown()
}
if !n.IsMaster && n.AdvertiseAddr != "" {
pool, err := n.rpcClient.getConnPool(n.AdvertiseAddr)
if err != nil {
log.Println("Retrieve master address error", err)
goto EXIT
}
client := clusterpb.NewMasterClient(pool.Get())
request := &clusterpb.UnregisterRequest{
ServiceAddr: n.ServiceAddr,
}
_, err = client.Unregister(context.Background(), request)
if err != nil {
log.Println("Unregister current node failed", err)
goto EXIT
}
}
EXIT:
if n.server != nil {
n.server.GracefulStop()
}
}
// Enable current server accept connection
func (n *Node) listenAndServe() {
listener, err := net.Listen("tcp", n.ClientAddr)
if err != nil {
log.Fatal(err.Error())
}
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
log.Println(err.Error())
continue
}
go n.handler.handle(conn)
}
}
func (n *Node) listenAndServeWS() {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: env.CheckOrigin,
}
http.HandleFunc("/"+strings.TrimPrefix(env.WSPath, "/"), func(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(fmt.Sprintf("Upgrade failure, URI=%s, Error=%s", r.RequestURI, err.Error()))
return
}
n.handler.handleWS(conn)
})
if err := http.ListenAndServe(n.ClientAddr, nil); err != nil {
log.Fatal(err.Error())
}
}
func (n *Node) listenAndServeWSTLS() {
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: env.CheckOrigin,
}
http.HandleFunc("/"+strings.TrimPrefix(env.WSPath, "/"), func(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(fmt.Sprintf("Upgrade failure, URI=%s, Error=%s", r.RequestURI, err.Error()))
return
}
n.handler.handleWS(conn)
})
if err := http.ListenAndServeTLS(n.ClientAddr, n.TSLCertificate, n.TSLKey, nil); err != nil {
log.Fatal(err.Error())
}
}
func (n *Node) storeSession(s *session.Session) {
n.mu.Lock()
n.sessions[s.ID()] = s
n.mu.Unlock()
}
func (n *Node) findSession(sid int64) *session.Session {
n.mu.RLock()
s := n.sessions[sid]
n.mu.RUnlock()
return s
}
func (n *Node) findOrCreateSession(sid int64, gateAddr string) (*session.Session, error) {
n.mu.RLock()
s, found := n.sessions[sid]
n.mu.RUnlock()
if !found {
conns, err := n.rpcClient.getConnPool(gateAddr)
if err != nil {
return nil, err
}
ac := &acceptor{
sid: sid,
gateClient: clusterpb.NewMemberClient(conns.Get()),
rpcHandler: n.handler.remoteProcess,
gateAddr: gateAddr,
}
s = session.New(ac)
ac.session = s
n.mu.Lock()
n.sessions[sid] = s
n.mu.Unlock()
}
return s, nil
}
func (n *Node) HandleRequest(_ context.Context, req *clusterpb.RequestMessage) (*clusterpb.MemberHandleResponse, error) {
handler, found := n.handler.localHandlers[req.Route]
if !found {
return nil, fmt.Errorf("service not found in current node: %v", req.Route)
}
s, err := n.findOrCreateSession(req.SessionId, req.GateAddr)
if err != nil {
return nil, err
}
msg := &message.Message{
Type: message.Request,
ID: req.Id,
Route: req.Route,
Data: req.Data,
}
n.handler.localProcess(handler, req.Id, s, msg)
return &clusterpb.MemberHandleResponse{}, nil
}
func (n *Node) HandleNotify(_ context.Context, req *clusterpb.NotifyMessage) (*clusterpb.MemberHandleResponse, error) {
handler, found := n.handler.localHandlers[req.Route]
if !found {
return nil, fmt.Errorf("service not found in current node: %v", req.Route)
}
s, err := n.findOrCreateSession(req.SessionId, req.GateAddr)
if err != nil {
return nil, err
}
msg := &message.Message{
Type: message.Notify,
Route: req.Route,
Data: req.Data,
}
n.handler.localProcess(handler, 0, s, msg)
return &clusterpb.MemberHandleResponse{}, nil
}
func (n *Node) HandlePush(_ context.Context, req *clusterpb.PushMessage) (*clusterpb.MemberHandleResponse, error) {
s := n.findSession(req.SessionId)
if s == nil {
return &clusterpb.MemberHandleResponse{}, fmt.Errorf("session not found: %v", req.SessionId)
}
return &clusterpb.MemberHandleResponse{}, s.Push(req.Route, req.Data)
}
func (n *Node) HandleResponse(_ context.Context, req *clusterpb.ResponseMessage) (*clusterpb.MemberHandleResponse, error) {
s := n.findSession(req.SessionId)
if s == nil {
return &clusterpb.MemberHandleResponse{}, fmt.Errorf("session not found: %v", req.SessionId)
}
return &clusterpb.MemberHandleResponse{}, s.ResponseMID(req.Id, req.Data)
}
func (n *Node) NewMember(_ context.Context, req *clusterpb.NewMemberRequest) (*clusterpb.NewMemberResponse, error) {
n.handler.addRemoteService(req.MemberInfo)
n.cluster.addMember(req.MemberInfo)
return &clusterpb.NewMemberResponse{}, nil
}
func (n *Node) DelMember(_ context.Context, req *clusterpb.DelMemberRequest) (*clusterpb.DelMemberResponse, error) {
n.handler.delMember(req.ServiceAddr)
n.cluster.delMember(req.ServiceAddr)
return &clusterpb.DelMemberResponse{}, nil
}
// SessionClosed implements the MemberServer interface
func (n *Node) SessionClosed(_ context.Context, req *clusterpb.SessionClosedRequest) (*clusterpb.SessionClosedResponse, error) {
n.mu.Lock()
s, found := n.sessions[req.SessionId]
delete(n.sessions, req.SessionId)
n.mu.Unlock()
if found {
scheduler.PushTask(func() { session.Lifetime.Close(s) })
}
return &clusterpb.SessionClosedResponse{}, nil
}
// CloseSession implements the MemberServer interface
func (n *Node) CloseSession(_ context.Context, req *clusterpb.CloseSessionRequest) (*clusterpb.CloseSessionResponse, error) {
n.mu.Lock()
s, found := n.sessions[req.SessionId]
delete(n.sessions, req.SessionId)
n.mu.Unlock()
if found {
s.Close()
}
return &clusterpb.CloseSessionResponse{}, nil
}

@ -0,0 +1,9 @@
package cluster
const (
_ int32 = iota
statusStart // 1
statusHandshake
statusWorking
statusClosed
)

@ -0,0 +1,116 @@
package cluster
import (
"github.com/gorilla/websocket"
"io"
"net"
"time"
)
// wsConn is an adapter to t.Conn, which implements all t.Conn
// interface base on *websocket.Conn
type wsConn struct {
conn *websocket.Conn
typ int // message type
reader io.Reader
}
// newWSConn return an initialized *wsConn
func newWSConn(conn *websocket.Conn) (*wsConn, error) {
c := &wsConn{conn: conn}
t, r, err := conn.NextReader()
if err != nil {
return nil, err
}
c.typ = t
c.reader = r
return c, nil
}
// Read reads data from the connection.
// Read can be made to time out and return an Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *wsConn) Read(b []byte) (int, error) {
n, err := c.reader.Read(b)
if err != nil && err != io.EOF {
return n, err
} else if err == io.EOF {
_, r, err := c.conn.NextReader()
if err != nil {
return 0, err
}
c.reader = r
}
return n, nil
}
// Write writes data to the connection.
// Write can be made to time out and return an Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetWriteDeadline.
func (c *wsConn) Write(b []byte) (int, error) {
err := c.conn.WriteMessage(websocket.BinaryMessage, b)
if err != nil {
return 0, err
}
return len(b), nil
}
// Close closes the connection.
// Any blocked Read or Write operations will be unblocked and return errors.
func (c *wsConn) Close() error {
return c.conn.Close()
}
// LocalAddr returns the local network address.
func (c *wsConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *wsConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated
// with the connection. It is equivalent to calling both
// SetReadDeadline and SetWriteDeadline.
//
// A deadline is an absolute time after which I/O operations
// fail with a timeout (see type Error) instead of
// blocking. The deadline applies to all future and pending
// I/O, not just the immediately following call to Read or
// Write. After a deadline has been exceeded, the connection
// can be refreshed by setting a deadline in the future.
//
// An idle timeout can be implemented by repeatedly extending
// the deadline after successful Read or Write calls.
//
// A zero value for t means I/O operations will not time out.
func (c *wsConn) SetDeadline(t time.Time) error {
if err := c.conn.SetReadDeadline(t); err != nil {
return err
}
return c.conn.SetWriteDeadline(t)
}
// SetReadDeadline sets the deadline for future Read calls
// and any currently-blocked Read call.
// A zero value for t means Read will not time out.
func (c *wsConn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the deadline for future Write calls
// and any currently-blocked Write call.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
// A zero value for t means Write will not time out.
func (c *wsConn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

@ -0,0 +1,160 @@
# 协议格式
`ngs`的二进制协议包含两层编码package和message。message层主要实现route压缩和protobuf压缩message
层的编码结果将传递给package层。package层主要实现`ngs`应用基于二进制协议的握手过程,心跳和数据传输编码,
package层的编码结果可以通过tcpwebsocket等协议以二进制数据的形式进行传输。message层编码可选也可替换
成其他二进制编码格式都不影响package层编码和发送。
`ngs`协议层的结构如下图所示:
![Ngs Protocol](images/data-trans.png)
## Ngs Package
package协议主要用来封装在面向连接的二进制流的通讯协议tcp上的`ngs`数据包。package分为控制包和数
据包两种类型。前者用来实现`ngs`应用层面的控制流程,包括客户端和服务器的握手,心跳和服务器主动断开连接的通
知等控制信息。后者则是用来在客户端和服务器之间传输应用数据。
#### Package Format
package分为header和body两部分。header描述package包的类型和包的长度body则是需要传输的数据内容。具体
格式如下:
![Ngs package](images/packet-format.png)
* type - package类型1个byte取值如下。
- 0x01: 客户端到服务器的握手请求以及服务器到客户端的握手响应
- 0x02: 客户端到服务器的握手ack
- 0x03: 心跳包
- 0x04: 数据包
- 0x05: 服务器主动断开连接通知
* length - body内容长度3个byte的大端整数因此最大的包长度为2^24个byte。
* body - 二进制的传输内容。
各个package类型的具体描述和控制流程如下。
#### 握手(Handshake Package)
握手流程主要提供一个机会,让客户端和服务器在连接建立后,进行一些初始化的数据交换。交换的数据分为系统和用
户两部分。系统部分为`Ngs`框架所需信息,用户部分则是用户可以在具体应用中自定义的内容。
握手的内容为utf-8编码的json字符串不压缩通过body字段传输。
握手请求:
```json
{
"sys": {
"version": "1.1.1",
"type": "js-websocket"
},
"user": {
// Any customized request data
}
}
```
* sys.version - 客户端的版本号。每个客户端SDK的每一个版本都有一个固定的版本号。在握手阶段客户端将该版本
号上传给服务器,服务器可以由此来判断当前客户端是否合适与服务器通讯。
* sys.type - 客户端的类型。可以通过客户端类型和版本号一起来确定客户端是否合适。
握手响应:
```javascript
{
"code": 200, // response code
"sys": {
"heartbeat": 3, // heartbeat interval in second
"dict": {}, // route dictionary
},
"user": {
// Any customized response data
}
}
```
* code - 握手响应的状态码。目前的取值200代表成功500为处理用户自定义握手流程时失败501为客户端版
本号不符合要求。
* sys.heartbeat - 可选,心跳时间间隔,单位为秒,没指定表示不需要心跳。
* sys.dict - 可选route字段压缩的映射表没指定表示没有字典压缩。
* sys.protos - 可选protobuf压缩的数据定义没有表示没有protobuf压缩。
* user - 可选,用户自定义的握手数据,没有表示没有用户自定义的握手数据。
握手的流程如下:
![Ngs handshake](images/handshake.png)
当底层连接建立后客户端向服务器发起握手请求并附带必要的数据。服务器检验握手数据后返回握手响应。如果握手成功客户端向服务器发送一个握手ack握手阶段至此成功结束。
#### 心跳(Heartbeat Package)
心跳包的length字段为0body为空。
心跳的流程如下:
![Ngs heartbeat](images/heartbeat.png)
服务器可以配置心跳时间间隔。当握手结束后,客户端发起第一个心跳。服务器和客户端收到心跳包后,延迟心跳间隔的时间后再向对方发送一个心跳包。
心跳超时时间为2倍的心跳间隔时间。服务器检测到心跳超时并不会主动断开客户端的连接。客户端检测到心跳超时可以根据策略选择是否要主动断开连接。
#### 数据
数据包用来在客户端和服务器之间传输数据所用。数据包的body是由上层传下来的任意二进制数据package层不会对body内容做任何处理。
#### 服务器主动断开
当服务器主动断开客户端连接时(如:踢掉某个在线玩家),会先向客户端发送一个控制消息,然后再断开连接。客户端可以通过这个消息来判断是否是服务器主动断开连接的。
## Ngs Message
message协议的主要作用是封装消息头包括route和消息类型两部分不同的消息类型有着不同的消息头在消息头里面可能要打入message id(即requestId)和route信息。由于可能会有route压缩而且对于服务端push的消息
message id为空对于客户端请求的响应route为空因此message的头格式比较复杂。
消息头分为三部分flagmessage idroute。如下图所示
![Message Head](images/message-header.png)
从上图可以看出,`Ngs`消息头是可变的,会根据具体的消息类型和内容而改变。其中:
* flag位是必须的占用一个byte它决定了后面的消息类型和内容的格式;
* message id和route则是可选的。其中message id采用[varints 128变长编码](https://developers.google.com/protocol-buffers/docs/encoding#varints)方式根据值的大小长度在05byte之间。route则根据消息类型以及内容的大小长度在0255byte之间。
### 标志位(flag)
flag占用message头的第一个byte其内容如下
![flag](images/message-flag.png)
现在只用到了其中的4个bit这四个bit包括两部分占用3个bit的message type字段和占用1个bit的route标识其中
* message type用来标识消息类型,范围为07现在消息共有四类requestnotifyresponsepush值的范围
是03。不同的消息类型有着不同的消息内容下面会有详细分析。
* 最后一位的route表示route是否压缩影响route字段的长度。
这两部分之间相互独立,互不影响。
### 消息类型(Message Type)
不同类型的消息对应不同消息头消息类型通过flag字段的第2-4位来确定其对应关系以及相应的消息头如下图
![Message Head Content](images/message-type.png)
上面的 **-** 表示不影响消息类型的bit位。
### 路由压缩标志(Route Compression Flag)
route主要分为压缩和未压缩两种由flag的最后一位route压缩标志位指定当flag中的route标志为0时表示未压缩的route为1则表示是压缩route。route通过系统生成和用户自定义的字典进行压缩具体内容见[压缩协议](./route_compression_zh_CN.md)。
route字段的编码会依赖flag的这一位其格式如下图:
![Message Type](images/route-compre.png)
上图是不同的flag标志对应的route字段的内容
* flag的最后一位为1时后面跟的是一个uInt16表示的route字典编号需要通过查询字典来获取route;
* flag最后一位为0是后面route则由一个uInt8的byte用来表示route的字节长度。之后是通过utf8编码后的route字
符串其长度就是前面一位byte的uInt8的值因此route的长度最大支持256B。
## Summary
在本部分,介绍了`ngs`的协议包括package层和message层。当用户使用`ngs`作为网络层库的时候,可以根据这里提供的协议信息,在客户端可以依据此协议完成与服务端的通信。
***Copyright***:以上的部分内容与图表来自于[Pomelo Protocol](https://github.com/NetEase/pomelo/wiki/Communication-Protocol)

Binary file not shown.

@ -0,0 +1,11 @@
package ngs
import "errors"
// Errors that could be occurred during message handling.
var (
ErrCloseClosedGroup = errors.New("close closed group")
ErrClosedGroup = errors.New("group closed")
ErrMemberNotFound = errors.New("member not found in the group")
ErrSessionDuplication = errors.New("session has existed in the current group")
)

@ -1,3 +1,17 @@
module ng module ngs
go 1.18 go 1.18
require (
github.com/gorilla/websocket v1.5.0
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.28.0
)
require (
github.com/golang/protobuf v1.5.2 // indirect
golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 // indirect
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect
)

135
go.sum

@ -0,0 +1,135 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2 h1:6mzvA99KwZxbOrxww4EvWVQUnN1+xEu9tafK5ZxkYeA=
golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

@ -0,0 +1,209 @@
package ngs
import (
"fmt"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/message"
"ngs/session"
"sync"
"sync/atomic"
)
const (
groupStatusWorking = 0
groupStatusClosed = 1
)
// SessionFilter represents a filter which was used to filter session when Multicast,
// the session will receive the message while filter returns true.
type SessionFilter func(*session.Session) bool
// Group represents a session group which used to manage a number of
// sessions, data send to the group will send to all session in it.
type Group struct {
mu sync.RWMutex
status int32 // channel current status
name string // channel name
sessions map[int64]*session.Session // session id map to session instance
}
// NewGroup returns a new group instance
func NewGroup(n string) *Group {
return &Group{
status: groupStatusWorking,
name: n,
sessions: make(map[int64]*session.Session),
}
}
// Member returns specified UID's session
func (c *Group) Member(uid int64) (*session.Session, error) {
c.mu.RLock()
defer c.mu.RUnlock()
for _, s := range c.sessions {
if s.UID() == uid {
return s, nil
}
}
return nil, ErrMemberNotFound
}
// Members returns all member's UID in current group
func (c *Group) Members() []int64 {
c.mu.RLock()
defer c.mu.RUnlock()
var members []int64
for _, s := range c.sessions {
members = append(members, s.UID())
}
return members
}
// Multicast push the message to the filtered clients
func (c *Group) Multicast(route string, v interface{}, filter SessionFilter) error {
if c.isClosed() {
return ErrClosedGroup
}
data, err := message.Serialize(v)
if err != nil {
return err
}
if env.Debug {
log.Println(fmt.Sprintf("Multicast %s, Data=%+v", route, v))
}
c.mu.RLock()
defer c.mu.RUnlock()
for _, s := range c.sessions {
if !filter(s) {
continue
}
if err = s.Push(route, data); err != nil {
log.Println(err.Error())
}
}
return nil
}
// Broadcast push the message(s) to all members
func (c *Group) Broadcast(route string, v interface{}) error {
if c.isClosed() {
return ErrClosedGroup
}
data, err := message.Serialize(v)
if err != nil {
return err
}
if env.Debug {
log.Println(fmt.Sprintf("Broadcast %s, Data=%+v", route, v))
}
c.mu.RLock()
defer c.mu.RUnlock()
for _, s := range c.sessions {
if err = s.Push(route, data); err != nil {
log.Println(fmt.Sprintf("Session push message error, ID=%d, UID=%d, Error=%s", s.ID(), s.UID(), err.Error()))
}
}
return err
}
// Contains check whether a UID is contained in current group or not
func (c *Group) Contains(uid int64) bool {
_, err := c.Member(uid)
return err == nil
}
// Add add session to group
func (c *Group) Add(session *session.Session) error {
if c.isClosed() {
return ErrClosedGroup
}
if env.Debug {
log.Println(fmt.Sprintf("Add session to group %s, ID=%d, UID=%d", c.name, session.ID(), session.UID()))
}
c.mu.Lock()
defer c.mu.Unlock()
id := session.ID()
_, ok := c.sessions[session.ID()]
if ok {
return ErrSessionDuplication
}
c.sessions[id] = session
return nil
}
// Leave remove specified UID related session from group
func (c *Group) Leave(s *session.Session) error {
if c.isClosed() {
return ErrClosedGroup
}
if env.Debug {
log.Println(fmt.Sprintf("Remove session from group %s, UID=%d", c.name, s.UID()))
}
c.mu.Lock()
defer c.mu.Unlock()
delete(c.sessions, s.ID())
return nil
}
// LeaveAll clear all sessions in the group
func (c *Group) LeaveAll() error {
if c.isClosed() {
return ErrClosedGroup
}
c.mu.Lock()
defer c.mu.Unlock()
c.sessions = make(map[int64]*session.Session)
return nil
}
// Count get current member amount in the group
func (c *Group) Count() int {
c.mu.RLock()
defer c.mu.RUnlock()
return len(c.sessions)
}
func (c *Group) isClosed() bool {
if atomic.LoadInt32(&c.status) == groupStatusClosed {
return true
}
return false
}
// Close destroy group, which will release all resource in the group
func (c *Group) Close() error {
if c.isClosed() {
return ErrCloseClosedGroup
}
atomic.StoreInt32(&c.status, groupStatusClosed)
// release all reference
c.sessions = make(map[int64]*session.Session)
return nil
}

@ -0,0 +1,41 @@
package ngs
import (
"math/rand"
"ngs/session"
"testing"
)
func TestChannel_Add(t *testing.T) {
c := NewGroup("test_add")
var paraCount = 100
w := make(chan bool, paraCount)
for i := 0; i < paraCount; i++ {
go func(id int) {
s := session.New(nil)
s.Bind(int64(id + 1))
c.Add(s)
w <- true
}(i)
}
for i := 0; i < paraCount; i++ {
<-w
}
if c.Count() != paraCount {
t.Fatalf("count expect: %d, got: %d", paraCount, c.Count())
}
n := rand.Int63n(int64(paraCount)) + 1
if !c.Contains(n) {
t.Fail()
}
// leave
c.LeaveAll()
if c.Count() != 0 {
t.Fail()
}
}

@ -0,0 +1,111 @@
package ngs
import (
"fmt"
"ngs/cluster"
"ngs/component"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/runtime"
"ngs/scheduler"
"os"
"os/signal"
"path/filepath"
"strings"
"sync/atomic"
"syscall"
"time"
)
var running int32
// VERSION returns current ngs version
var VERSION = "0.1.0"
var (
// app represents the current server process
app = &struct {
name string // current application name
startAt time.Time // startup time
}{}
)
// Listen listens on the TCP network address addr
// and then calls Serve with handler to handle requests
// on incoming connections.
func Listen(addr string, opts ...Option) {
if atomic.AddInt32(&running, 1) != 1 {
log.Println("Ngs has running")
return
}
// application initialize
app.name = strings.TrimLeft(filepath.Base(os.Args[0]), "/")
app.startAt = time.Now()
// environment initialize
if wd, err := os.Getwd(); err != nil {
panic(err)
} else {
env.Wd, _ = filepath.Abs(wd)
}
opt := cluster.Options{
Components: &component.Components{},
}
for _, option := range opts {
option(&opt)
}
// Use listen address as client address in non-cluster mode
if !opt.IsMaster && opt.AdvertiseAddr == "" && opt.ClientAddr == "" {
log.Println("The current server running in singleton mode")
opt.ClientAddr = addr
}
// Set the retry interval to 3 secondes if doesn't set by user
if opt.RetryInterval == 0 {
opt.RetryInterval = time.Second * 3
}
node := &cluster.Node{
Options: opt,
ServiceAddr: addr,
}
err := node.Startup()
if err != nil {
log.Fatalf("Node startup failed: %v", err)
}
runtime.CurrentNode = node
if node.ClientAddr != "" {
log.Println(fmt.Sprintf("Startup *Ngs gate server* %s, client address: %v, service address: %s",
app.name, node.ClientAddr, node.ServiceAddr))
} else {
log.Println(fmt.Sprintf("Startup *Ngs backend server* %s, service address %s",
app.name, node.ServiceAddr))
}
go scheduler.Schedule()
sg := make(chan os.Signal)
signal.Notify(sg, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGKILL, syscall.SIGTERM)
select {
case <-env.Die:
log.Println("The app will shutdown in a few seconds")
case s := <-sg:
log.Println("Ngs server got signal", s)
}
log.Println("Ngs server is stopping...")
node.Shutdown()
runtime.CurrentNode = nil
scheduler.Close()
atomic.StoreInt32(&running, 0)
}
// Shutdown send a signal to let 'ngs' shutdown itself.
func Shutdown() {
close(env.Die)
}

@ -3,7 +3,7 @@ package codec
import ( import (
"bytes" "bytes"
"errors" "errors"
"ng/internal/packet" "ngs/internal/packet"
) )
// Codec constants. // Codec constants.
@ -84,7 +84,7 @@ func (c *Decoder) Decode(data []byte) ([]*packet.Packet, error) {
return packets, nil return packets, nil
} }
// Encode create a packet.Packet from the raw bytes slice and then encode to network bytes slice // Encode 从原始raw bytes创建一个用于网络传输的 packet.Packet 结构,参考网易 pomelo 协议
// Protocol refs: https://github.com/NetEase/pomelo/wiki/Communication-Protocol // Protocol refs: https://github.com/NetEase/pomelo/wiki/Communication-Protocol
// //
// -<type>-|--------<length>--------|-<data>- // -<type>-|--------<length>--------|-<data>-
@ -97,7 +97,7 @@ func Encode(typ packet.Type, data []byte) ([]byte, error) {
p := &packet.Packet{Type: typ, Length: len(data)} p := &packet.Packet{Type: typ, Length: len(data)}
buf := make([]byte, p.Length+HeadLength) buf := make([]byte, p.Length+HeadLength)
buf[0] = byte(p.Type) buf[0] = byte(p.Type) // 编译器提示,此处 byte 转换不能删
copy(buf[1:HeadLength], intToBytes(p.Length)) copy(buf[1:HeadLength], intToBytes(p.Length))
copy(buf[HeadLength:], data) copy(buf[HeadLength:], data)

@ -1,10 +1,78 @@
package codec package codec
import ( import (
. "ng/internal/packet" . "ngs/internal/packet"
"reflect"
"testing" "testing"
) )
func TestPack(t *testing.T) {
data := []byte("hello world")
p1 := &Packet{Type: Handshake, Data: data, Length: len(data)}
pp1, err := Encode(Handshake, data)
if err != nil {
t.Error(err.Error())
}
d1 := NewDecoder()
packets, err := d1.Decode(pp1)
if err != nil {
t.Fatal(err.Error())
}
if len(packets) < 1 {
t.Fatal("packets should not empty")
}
if !reflect.DeepEqual(p1, packets[0]) {
t.Fatalf("expect: %v, got: %v", p1, packets[0])
}
p2 := &Packet{Type: Type(5), Data: data, Length: len(data)}
pp2, err := Encode(Kick, data)
if err != nil {
t.Error(err.Error())
}
d2 := NewDecoder()
upp2, err := d2.Decode(pp2)
if err != nil {
t.Fatal(err.Error())
}
if len(upp2) < 1 {
t.Fatal("packets should not empty")
}
if !reflect.DeepEqual(p2, upp2[0]) {
t.Fatalf("expect: %v, got: %v", p2, upp2[0])
}
_ = &Packet{Type: Type(0), Data: data, Length: len(data)}
if _, err := Encode(Type(0), data); err == nil {
t.Error("should err")
}
_ = &Packet{Type: Type(6), Data: data, Length: len(data)}
if _, err = Encode(Type(6), data); err == nil {
t.Error("should err")
}
p5 := &Packet{Type: Type(5), Data: data, Length: len(data)}
pp5, err := Encode(Kick, data)
if err != nil {
t.Fatal(err.Error())
}
d3 := NewDecoder()
upp5, err := d3.Decode(append(pp5, []byte{0x01, 0x00, 0x00, 0x00}...))
if err != nil {
t.Fatal(err.Error())
}
if len(upp5) < 1 {
t.Fatal("packets should not empty")
}
if !reflect.DeepEqual(p5, upp5[0]) {
t.Fatalf("expect: %v, got: %v", p2, upp5[0])
}
}
func BenchmarkDecoder_Decode(b *testing.B) { func BenchmarkDecoder_Decode(b *testing.B) {
data := []byte("hello world") data := []byte("hello world")
pp1, err := Encode(Handshake, data) pp1, err := Encode(Handshake, data)

@ -0,0 +1,40 @@
package env
import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"net/http"
"ngs/serialize"
"ngs/serialize/protobuf"
"time"
)
var (
Wd string // working path
Die chan bool // wait for end application
Heartbeat time.Duration // Heartbeat internal
CheckOrigin func(*http.Request) bool // check origin when websocket enabled
Debug bool // enable Debug
WSPath string // WebSocket path(eg: ws://127.0.0.1/WSPath)
HandshakeValidator func([]byte) error // When you need to verify the custom data of the handshake request
// TimerPrecision indicates the precision of timer, default is time.Second
TimerPrecision = time.Second
// GlobalTicker represents global ticker that all cron job will be executed
// in globalTicker.
GlobalTicker *time.Ticker
Serializer serialize.Serializer
GrpcOptions = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
)
func init() {
Die = make(chan bool)
Heartbeat = 30 * time.Second
Debug = false
CheckOrigin = func(_ *http.Request) bool { return true }
HandshakeValidator = func(_ []byte) error { return nil }
Serializer = protobuf.NewSerializer()
}

@ -0,0 +1,222 @@
package message
import (
"encoding/binary"
"errors"
"fmt"
"ngs/internal/log"
"strings"
)
// Type represents the type of message, which could be Request/Notify/Response/Push
type Type byte
// Message types
const (
Request Type = 0x00
Notify = 0x01
Response = 0x02
Push = 0x03
)
const (
msgRouteCompressMask = 0x01 // 0000 0001 last bit
msgTypeMask = 0x07 // 0000 0111 1-3 bit (需要>>)
msgRouteLengthMask = 0xFF // 1111 1111 last 8 bit
msgHeadLength = 0x02 // 0000 0010 2 bit
)
var types = map[Type]string{
Request: "Request",
Notify: "Notify",
Response: "Response",
Push: "Push",
}
func (t Type) String() string {
return types[t]
}
var (
routes = make(map[string]uint16) // route map to code
codes = make(map[uint16]string) // code map to route
)
// Errors that could be occurred in message codec
var (
ErrWrongMessageType = errors.New("wrong message type")
ErrInvalidMessage = errors.New("invalid message")
ErrRouteInfoNotFound = errors.New("route info not found in dictionary")
ErrWrongMessage = errors.New("wrong message")
)
// Message represents an unmarshaler message or a message which to be marshaled
type Message struct {
Type Type // message type (flag)
ID uint64 // unique id, zero while notify mode
Route string // route for locating service
Data []byte // payload
compressed bool // if message compressed
}
// New returns a new message instance
func New() *Message {
return &Message{}
}
// String, implementation of fmt.Stringer interface
func (m *Message) String() string {
return fmt.Sprintf("%s %s (%dbytes)", types[m.Type], m.Route, len(m.Data))
}
// Encode marshals message to binary format.
func (m *Message) Encode() ([]byte, error) {
return Encode(m)
}
func routable(t Type) bool {
return t == Request || t == Notify || t == Push
}
func invalidType(t Type) bool {
return t < Request || t > Push
}
// Encode marshals message to binary format. Different message types is corresponding to
// different message header, message types is identified by 2-4 bit of flag field. The
// relationship between message types and message header is presented as follows:
// ------------------------------------------
// | type | flag | other |
// |----------|--------|--------------------|
// | request |----000-|<message id>|<route>|
// | notify |----001-|<route> |
// | response |----010-|<message id> |
// | push |----011-|<route> |
// ------------------------------------------
// The figure above indicates that the bit does not affect the type of message.
func Encode(m *Message) ([]byte, error) {
if invalidType(m.Type) {
return nil, ErrWrongMessageType
}
buf := make([]byte, 0)
flag := byte(m.Type << 1) // 编译器提示,此处 byte 转换不能删
code, compressed := routes[m.Route]
if compressed {
flag |= msgRouteCompressMask
}
buf = append(buf, flag)
if m.Type == Request || m.Type == Response {
n := m.ID
// variant length encode
for {
b := byte(n % 128)
n >>= 7
if n != 0 {
buf = append(buf, b+128)
} else {
buf = append(buf, b)
break
}
}
}
if routable(m.Type) {
if compressed {
buf = append(buf, byte((code>>8)&0xFF))
buf = append(buf, byte(code&0xFF))
} else {
buf = append(buf, byte(len(m.Route)))
buf = append(buf, []byte(m.Route)...)
}
}
buf = append(buf, m.Data...)
return buf, nil
}
// Decode unmarshal the bytes slice to a message
func Decode(data []byte) (*Message, error) {
if len(data) < msgHeadLength {
return nil, ErrInvalidMessage
}
m := New()
flag := data[0]
offset := 1
m.Type = Type((flag >> 1) & msgTypeMask) // 编译器提示,此处Type转换不能删
if invalidType(m.Type) {
return nil, ErrWrongMessageType
}
if m.Type == Request || m.Type == Response {
id := uint64(0)
// little end byte order
// WARNING: must can be stored in 64 bits integer
// variant length encode
for i := offset; i < len(data); i++ {
b := data[i]
id += uint64(b&0x7F) << uint64(7*(i-offset))
if b < 128 {
offset = i + 1
break
}
}
m.ID = id
}
if offset >= len(data) {
return nil, ErrWrongMessage
}
if routable(m.Type) {
if flag&msgRouteCompressMask == 1 {
m.compressed = true
code := binary.BigEndian.Uint16(data[offset:(offset + 2)])
route, ok := codes[code]
if !ok {
return nil, ErrRouteInfoNotFound
}
m.Route = route
offset += 2
} else {
m.compressed = false
rl := data[offset]
offset++
if offset+int(rl) > len(data) {
return nil, ErrWrongMessage
}
m.Route = string(data[offset:(offset + int(rl))])
offset += int(rl)
}
}
if offset > len(data) {
return nil, ErrWrongMessage
}
m.Data = data[offset:]
return m, nil
}
// SetDictionary set routes map which be used to compress route.
// TODO(warning): set dictionary in runtime would be a dangerous operation!!!!!!
func SetDictionary(dict map[string]uint16) {
for route, code := range dict {
r := strings.TrimSpace(route)
// duplication check
if _, ok := routes[r]; ok {
log.Println(fmt.Sprintf("duplicated route(route: %s, code: %d)", r, code))
}
if _, ok := codes[code]; ok {
log.Println(fmt.Sprintf("duplicated route(route: %s, code: %d)", r, code))
}
// update map, using last value when key duplicated
routes[r] = code
codes[code] = r
}
}

@ -0,0 +1,164 @@
package message
import (
"reflect"
"testing"
)
func TestEncode(t *testing.T) {
dict := map[string]uint16{
"test.test.test": 100,
"test.test.test1": 101,
"test.test.test2": 102,
"test.test.test3": 103,
}
SetDictionary(dict)
m1 := &Message{
Type: Request,
ID: 100,
Route: "test.test.test",
Data: []byte(`hello world`),
compressed: true,
}
em1, err := m1.Encode()
if err != nil {
t.Error(err.Error())
}
dm1, err := Decode(em1)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m1, dm1) {
t.Error("not equal")
}
m2 := &Message{
Type: Request,
ID: 100,
Route: "test.test.test4",
Data: []byte(`hello world`),
}
em2, err := m2.Encode()
if err != nil {
t.Error(err.Error())
}
dm2, err := Decode(em2)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m2, dm2) {
t.Error("not equal")
}
m3 := &Message{
Type: Response,
ID: 100,
Data: []byte(`hello world`),
}
em3, err := m3.Encode()
if err != nil {
t.Error(err.Error())
}
dm3, err := Decode(em3)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m3, dm3) {
t.Error("not equal")
}
m4 := &Message{
Type: Response,
ID: 100,
Data: []byte(`hello world`),
}
em4, err := m4.Encode()
if err != nil {
t.Error(err.Error())
}
dm4, err := Decode(em4)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m4, dm4) {
t.Error("not equal")
}
m5 := &Message{
Type: Notify,
Route: "test.test.test",
Data: []byte(`hello world`),
compressed: true,
}
em5, err := m5.Encode()
if err != nil {
t.Error(err.Error())
}
dm5, err := Decode(em5)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m5, dm5) {
t.Error("not equal")
}
m6 := &Message{
Type: Notify,
Route: "test.test.test20",
Data: []byte(`hello world`),
}
em6, err := m6.Encode()
if err != nil {
t.Error(err.Error())
}
dm6, err := Decode(em6)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m6, dm6) {
t.Error("not equal")
}
m7 := &Message{
Type: Push,
Route: "test.test.test9",
Data: []byte(`hello world`),
}
em7, err := m7.Encode()
if err != nil {
t.Error(err.Error())
}
dm7, err := Decode(em7)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m7, dm7) {
t.Error("not equal")
}
m8 := &Message{
Type: Push,
Route: "test.test.test3",
Data: []byte(`hello world`),
compressed: true,
}
em8, err := m8.Encode()
if err != nil {
t.Error(err.Error())
}
dm8, err := Decode(em8)
if err != nil {
t.Error(err.Error())
}
if !reflect.DeepEqual(m8, dm8) {
t.Error("not equal")
}
}

@ -0,0 +1,14 @@
package message
import "ngs/internal/env"
func Serialize(v interface{}) ([]byte, error) {
if data, ok := v.([]byte); ok {
return data, nil
}
data, err := env.Serializer.Marshal(v)
if err != nil {
return nil, err
}
return data, nil
}

@ -0,0 +1,5 @@
package runtime
import "ngs/cluster"
var CurrentNode *cluster.Node

@ -0,0 +1,10 @@
package mock
// NetAddr mock the net.Addr interface
type NetAddr struct{}
// Network implements the net.Addr interface
func (a NetAddr) Network() string { return "mock" }
// String implements the net.Addr interface
func (a NetAddr) String() string { return "mock-addr" }

@ -0,0 +1,153 @@
package ngs
import (
"google.golang.org/grpc"
"net/http"
"ngs/cluster"
"ngs/component"
"ngs/internal/env"
"ngs/internal/log"
"ngs/internal/message"
"ngs/pipeline"
"ngs/serialize"
"time"
)
type Option func(*cluster.Options)
func WithPipeline(pipeline pipeline.Pipeline) Option {
return func(opt *cluster.Options) {
opt.Pipeline = pipeline
}
}
// WithAdvertiseAddr sets the advertisement address option, it will be the listen address in
// master node and an advertisement address which cluster member to connect
func WithAdvertiseAddr(addr string, retryInterval ...time.Duration) Option {
return func(opt *cluster.Options) {
opt.AdvertiseAddr = addr
if len(retryInterval) > 0 {
opt.RetryInterval = retryInterval[0]
}
}
}
// WithClientAddr sets the listen address which is used to establish connection between
// cluster members. Will select an available port automatically if no member address
// setting and panic if no available port
func WithClientAddr(addr string) Option {
return func(opt *cluster.Options) {
opt.ClientAddr = addr
}
}
// WithMaster sets the option to indicate whether the current node is master node
func WithMaster() Option {
return func(opt *cluster.Options) {
opt.IsMaster = true
}
}
// WithGrpcOptions sets the grpc dial options
func WithGrpcOptions(opts ...grpc.DialOption) Option {
return func(_ *cluster.Options) {
env.GrpcOptions = append(env.GrpcOptions, opts...)
}
}
// WithComponents sets the Components
func WithComponents(components *component.Components) Option {
return func(opt *cluster.Options) {
opt.Components = components
}
}
// WithHeartbeatInterval sets Heartbeat time interval
func WithHeartbeatInterval(d time.Duration) Option {
return func(_ *cluster.Options) {
env.Heartbeat = d
}
}
// WithCheckOriginFunc sets the function that check `Origin` in http headers
func WithCheckOriginFunc(fn func(*http.Request) bool) Option {
return func(opt *cluster.Options) {
env.CheckOrigin = fn
}
}
// WithDebugMode let 'ngs' to run under Debug mode.
func WithDebugMode() Option {
return func(_ *cluster.Options) {
env.Debug = true
}
}
// WithDictionary sets routes map
func WithDictionary(dict map[string]uint16) Option {
return func(_ *cluster.Options) {
message.SetDictionary(dict)
}
}
func WithWSPath(path string) Option {
return func(_ *cluster.Options) {
env.WSPath = path
}
}
// WithTimerPrecision sets the ticker precision, and time precision can not less
// than a Millisecond, and can not change after application running. The default
// precision is time.Second
func WithTimerPrecision(precision time.Duration) Option {
if precision < time.Millisecond {
panic("time precision can not less than a Millisecond")
}
return func(_ *cluster.Options) {
env.TimerPrecision = precision
}
}
// WithSerializer customizes application serializer, which automatically Marshal
// and UnMarshal handler payload
func WithSerializer(serializer serialize.Serializer) Option {
return func(opt *cluster.Options) {
env.Serializer = serializer
}
}
// WithLabel sets the current node label in cluster
func WithLabel(label string) Option {
return func(opt *cluster.Options) {
opt.Label = label
}
}
// WithIsWebsocket indicates whether current node WebSocket is enabled
func WithIsWebsocket(enableWs bool) Option {
return func(opt *cluster.Options) {
opt.IsWebsocket = enableWs
}
}
// WithTSLConfig sets the `key` and `certificate` of TSL
func WithTSLConfig(certificate, key string) Option {
return func(opt *cluster.Options) {
opt.TSLCertificate = certificate
opt.TSLKey = key
}
}
// WithLogger overrides the default logger
func WithLogger(l log.Logger) Option {
return func(opt *cluster.Options) {
log.SetLogger(l)
}
}
// WithHandshakeValidator sets the function that Verify `handshake` data
func WithHandshakeValidator(fn func([]byte) error) Option {
return func(opt *cluster.Options) {
env.HandshakeValidator = fn
}
}

@ -0,0 +1,77 @@
package pipeline
import (
"ngs/internal/message"
"ngs/session"
"sync"
)
type (
// Message is the alias of `message.Message`
Message = message.Message
Func func(s *session.Session, msg *message.Message) error
Pipeline interface {
Outbound() Channel
Inbound() Channel
}
pipeline struct {
outbound, inbound *pipelineChannel
}
Channel interface {
PushFront(h Func)
PushBack(h Func)
Process(s *session.Session, msg *message.Message) error
}
pipelineChannel struct {
mu sync.RWMutex
handlers []Func
}
)
func New() Pipeline {
return &pipeline{
outbound: &pipelineChannel{},
inbound: &pipelineChannel{},
}
}
func (p *pipeline) Outbound() Channel { return p.outbound }
func (p *pipeline) Inbound() Channel { return p.inbound }
// PushFront push a function to the front of the pipeline
func (p *pipelineChannel) PushFront(h Func) {
p.mu.Lock()
defer p.mu.Unlock()
handlers := make([]Func, len(p.handlers)+1)
handlers[0] = h
copy(handlers[1:], p.handlers)
p.handlers = handlers
}
// PushBack push a function to the end of the pipeline
func (p *pipelineChannel) PushBack(h Func) {
p.mu.Lock()
defer p.mu.Unlock()
p.handlers = append(p.handlers, h)
}
// Process message with all pipeline functions
func (p *pipelineChannel) Process(s *session.Session, msg *message.Message) error {
p.mu.RLock()
defer p.mu.RUnlock()
if len(p.handlers) < 1 {
return nil
}
for _, h := range p.handlers {
err := h(s, msg)
if err != nil {
return err
}
}
return nil
}

@ -0,0 +1,79 @@
package scheduler
import (
"fmt"
"ngs/internal/env"
"ngs/internal/log"
"runtime/debug"
"sync/atomic"
"time"
)
const (
messageQueueBacklog = 1 << 10 // 1024
sessionCloseBacklog = 1 << 8 // 256
)
// LocalScheduler schedules task to a customized goroutine
type LocalScheduler interface {
Schedule(Task)
}
type Task func()
type Hook func()
var (
chDie = make(chan struct{})
chExit = make(chan struct{})
chTasks = make(chan Task, 1<<8)
started int32
closed int32
)
func try(f func()) {
defer func() {
if err := recover(); err != nil {
log.Println(fmt.Sprintf("Handle message panic: %+v\n%s", err, debug.Stack()))
}
}()
f()
}
func Schedule() {
if atomic.AddInt32(&started, 1) != 1 {
return
}
ticker := time.NewTicker(env.TimerPrecision)
defer func() {
ticker.Stop()
close(chExit)
}()
for {
select {
case <-ticker.C:
cron()
case f := <-chTasks:
try(f)
case <-chDie:
return
}
}
}
func Close() {
if atomic.AddInt32(&closed, 1) != 1 {
return
}
close(chDie)
<-chExit
log.Println("Scheduler stopped")
}
func PushTask(task Task) {
chTasks <- task
}

@ -0,0 +1,196 @@
package scheduler
import (
"fmt"
"math"
"ngs/internal/log"
"runtime/debug"
"sync"
"sync/atomic"
"time"
)
const (
infinite = -1
)
var (
timerManager = &struct {
incrementID int64 // auto increment id
timers map[int64]*Timer // all timers
muClosingTimer sync.RWMutex // 关闭锁,避免重复关闭
closingTimer []int64 // 已关闭的timer id
muCreatedTimer sync.RWMutex // 创建锁,避免重复创建
createdTimer []*Timer // 已创建的Timer
}{}
)
type (
// TimerFunc represents a function which will be called periodically in main
// logic goroutine.
TimerFunc func()
// TimerCondition represents a checker that returns true when cron job needs
// to execute
TimerCondition interface {
Check(now time.Time) bool
}
// Timer represents a cron job
Timer struct {
id int64 // timer id
fn TimerFunc // function that execute
createAt int64 // timer create time
interval time.Duration // execution interval
condition TimerCondition // condition to cron job execution
elapse int64 // total elapse time
closed int32 // is timer closed
counter int // counter
}
)
func init() {
timerManager.timers = map[int64]*Timer{}
}
// ID returns id of current timer
func (t *Timer) ID() int64 {
return t.id
}
// Stop turns off a timer. After Stop, fn will not be called forever
func (t *Timer) Stop() {
if atomic.AddInt32(&t.closed, 1) != 1 {
return
}
t.counter = 0
}
// safeCall 安全调用,收集所有 fn 触发的 panic给与提示即可
func safeCall(_ int64, fn TimerFunc) {
defer func() {
if err := recover(); err != nil {
log.Println(fmt.Sprintf("Handle timer panic: %+v\n%s", err, debug.Stack()))
}
}()
fn()
}
func cron() {
if len(timerManager.createdTimer) > 0 {
timerManager.muCreatedTimer.Lock()
for _, t := range timerManager.createdTimer {
timerManager.timers[t.id] = t
}
timerManager.createdTimer = timerManager.createdTimer[:0]
timerManager.muCreatedTimer.Unlock()
}
if len(timerManager.timers) < 1 {
return
}
now := time.Now()
unn := now.UnixNano()
for id, t := range timerManager.timers {
if t.counter == infinite || t.counter > 0 {
// condition timer
if t.condition != nil {
if t.condition.Check(now) {
safeCall(id, t.fn)
}
continue
}
// execute job
if t.createAt+t.elapse <= unn {
safeCall(id, t.fn)
t.elapse += int64(t.interval)
// update timer counter
if t.counter != infinite && t.counter > 0 {
t.counter--
}
}
}
if t.counter == 0 {
timerManager.muClosingTimer.Lock()
timerManager.closingTimer = append(timerManager.closingTimer, t.id)
timerManager.muClosingTimer.Unlock()
continue
}
}
if len(timerManager.closingTimer) > 0 {
timerManager.muClosingTimer.Lock()
for _, id := range timerManager.closingTimer {
delete(timerManager.timers, id)
}
timerManager.closingTimer = timerManager.closingTimer[:0]
timerManager.muClosingTimer.Unlock()
}
}
// NewTimer returns a new Timer containing a function that will be called
// with a period specified by the duration argument. It adjusts the intervals
// for slow receivers.
// The duration d must be greater than zero; if not, NewTimer will panic.
// Stop the timer to release associated resources.
func NewTimer(interval time.Duration, fn TimerFunc) *Timer {
return NewCountTimer(interval, infinite, fn)
}
// NewCountTimer returns a new Timer containing a function that will be called
// with a period specified by the duration argument. After count times, timer
// will be stopped automatically, It adjusts the intervals for slow receivers.
// The duration d must be greater than zero; if not, NewCountTimer will panic.
// Stop the timer to release associated resources.
func NewCountTimer(interval time.Duration, count int, fn TimerFunc) *Timer {
if fn == nil {
panic("ngs/timer: nil timer function")
}
if interval <= 0 {
panic("non-positive interval for NewTimer")
}
t := &Timer{
id: atomic.AddInt64(&timerManager.incrementID, 1),
fn: fn,
createAt: time.Now().UnixNano(),
interval: interval,
elapse: int64(interval), // first execution will be after interval
counter: count,
}
timerManager.muCreatedTimer.Lock()
timerManager.createdTimer = append(timerManager.createdTimer, t)
timerManager.muCreatedTimer.Unlock()
return t
}
// NewAfterTimer returns a new Timer containing a function that will be called
// after duration that specified by the duration argument.
// The duration d must be greater than zero; if not, NewAfterTimer will panic.
// Stop the timer to release associated resources.
func NewAfterTimer(duration time.Duration, fn TimerFunc) *Timer {
return NewCountTimer(duration, 1, fn)
}
// NewCondTimer returns a new Timer containing a function that will be called
// when condition satisfied that specified by the condition argument.
// The duration d must be greater than zero; if not, NewCondTimer will panic.
// Stop the timer to release associated resources.
func NewCondTimer(condition TimerCondition, fn TimerFunc) *Timer {
if condition == nil {
panic("ngs/timer: nil condition")
}
t := NewCountTimer(time.Duration(math.MaxInt64), infinite, fn)
t.condition = condition
return t
}

@ -0,0 +1,84 @@
package scheduler
import (
"sync/atomic"
"testing"
"time"
)
func TestNewTimer(t *testing.T) {
var exists = struct {
timers int
createdTimes int
closingTimers int
}{
timers: len(timerManager.timers),
createdTimes: len(timerManager.createdTimer),
closingTimers: len(timerManager.closingTimer),
}
const tc = 1000
var counter int64
for i := 0; i < tc; i++ {
NewTimer(1*time.Millisecond, func() {
atomic.AddInt64(&counter, 1)
})
}
<-time.After(5 * time.Millisecond)
cron()
cron()
if counter != tc*2 {
t.Fatalf("expect: %d, got: %d", tc*2, counter)
}
if len(timerManager.timers) != exists.timers+tc {
t.Fatalf("timers: %d", len(timerManager.timers))
}
if len(timerManager.createdTimer) != exists.createdTimes {
t.Fatalf("createdTimer: %d", len(timerManager.createdTimer))
}
if len(timerManager.closingTimer) != exists.closingTimers {
t.Fatalf("closingTimer: %d", len(timerManager.closingTimer))
}
}
func TestNewAfterTimer(t *testing.T) {
var exists = struct {
timers int
createdTimes int
closingTimers int
}{
timers: len(timerManager.timers),
createdTimes: len(timerManager.createdTimer),
closingTimers: len(timerManager.closingTimer),
}
const tc = 1000
var counter int64
for i := 0; i < tc; i++ {
NewAfterTimer(1*time.Millisecond, func() {
atomic.AddInt64(&counter, 1)
})
}
<-time.After(5 * time.Millisecond)
cron()
if counter != tc {
t.Fatalf("expect: %d, got: %d", tc, counter)
}
if len(timerManager.timers) != exists.timers {
t.Fatalf("timers: %d", len(timerManager.timers))
}
if len(timerManager.createdTimer) != exists.createdTimes {
t.Fatalf("createdTimer: %d", len(timerManager.createdTimer))
}
if len(timerManager.closingTimer) != exists.closingTimers {
t.Fatalf("closingTimer: %d", len(timerManager.closingTimer))
}
}

@ -0,0 +1,25 @@
package json
import (
"encoding/json"
"ngs/serialize"
)
// Serializer implements the serialize.Serializer interface
type Serializer struct{}
// NewSerializer returns a new serialize.Serializer.
func NewSerializer() serialize.Serializer {
return &Serializer{}
}
// Marshal returns the JSON encoding of v.
func (s *Serializer) Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v.
func (s *Serializer) Unmarshal(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

@ -0,0 +1,62 @@
package json
import (
"reflect"
"testing"
)
type Message struct {
Code int `json:"code"`
Data string `json:"data"`
}
func TestSerializer_Serialize(t *testing.T) {
m := Message{1, "hello world"}
s := NewSerializer()
b, err := s.Marshal(m)
if err != nil {
t.Fail()
}
m2 := Message{}
if err := s.Unmarshal(b, &m2); err != nil {
t.Fail()
}
if !reflect.DeepEqual(m, m2) {
t.Fail()
}
}
func BenchmarkSerializer_Serialize(b *testing.B) {
m := &Message{100, "hell world"}
s := NewSerializer()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := s.Marshal(m); err != nil {
b.Fatalf("unmarshal failed: %v", err)
}
}
b.ReportAllocs()
}
func BenchmarkSerializer_Deserialize(b *testing.B) {
m := &Message{100, "hell world"}
s := NewSerializer()
d, err := s.Marshal(m)
if err != nil {
b.Error(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m1 := &Message{}
if err := s.Unmarshal(d, m1); err != nil {
b.Fatalf("unmarshal failed: %v", err)
}
}
b.ReportAllocs()
}

@ -0,0 +1,37 @@
package protobuf
import (
"errors"
"google.golang.org/protobuf/proto"
"ngs/serialize"
)
// ErrWrongValueType is the error used for marshal the value with protobuf encoding.
var ErrWrongValueType = errors.New("protobuf: convert on wrong type value")
// Serializer implements the serialize.Serializer interface
type Serializer struct{}
// NewSerializer returns a new Serializer.
func NewSerializer() serialize.Serializer {
return &Serializer{}
}
// Marshal returns the protobuf encoding of v.
func (s *Serializer) Marshal(v interface{}) ([]byte, error) {
pb, ok := v.(proto.Message)
if !ok {
return nil, ErrWrongValueType
}
return proto.Marshal(pb)
}
// Unmarshal parses the protobuf-encoded data and stores the result
// in the value pointed to by v.
func (s *Serializer) Unmarshal(data []byte, v interface{}) error {
pb, ok := v.(proto.Message)
if !ok {
return ErrWrongValueType
}
return proto.Unmarshal(data, pb)
}

@ -0,0 +1,56 @@
package protobuf
import (
"google.golang.org/protobuf/proto"
"ngs/benchmark/testdata"
"testing"
)
func TestProtobufSerialezer_Serialize(t *testing.T) {
m := &testdata.Ping{Content: "hello"}
s := NewSerializer()
b, err := s.Marshal(m)
if err != nil {
t.Error(err)
}
m1 := &testdata.Ping{}
if err := s.Unmarshal(b, m1); err != nil {
t.Fatalf("unmarshal failed: %v", err)
}
// refer: https://developers.google.com/protocol-buffers/docs/reference/go/faq#deepequal
if !proto.Equal(m, m1) {
t.Fail()
}
}
func BenchmarkSerializer_Serialize(b *testing.B) {
m := &testdata.Ping{Content: "hello"}
s := NewSerializer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if _, err := s.Marshal(m); err != nil {
b.Fatalf("unmarshal failed: %v", err)
}
}
}
func BenchmarkSerializer_Deserialize(b *testing.B) {
m := &testdata.Ping{Content: "hello"}
s := NewSerializer()
d, err := s.Marshal(m)
if err != nil {
b.Error(err)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
m1 := &testdata.Ping{}
if err := s.Unmarshal(d, m1); err != nil {
b.Fatalf("unmarshal failed: %v", err)
}
}
}

@ -0,0 +1,19 @@
package serialize
type (
// Marshaler is the interface implemented by types that
Marshaler interface {
Marshal(interface{}) ([]byte, error)
}
// Unmarshaler represents an Unmarshal interface
Unmarshaler interface {
Unmarshal([]byte, interface{}) error
}
// Serializer is the interface that groups the basic Marshal and Unmarshal methods.
Serializer interface {
Marshaler
Unmarshaler
}
)

@ -0,0 +1,41 @@
package service
import "sync/atomic"
// Connections is a global variable which is used by session. thread-safe
var Connections = newConnectionService()
type connectionService struct {
count int64
sid int64
}
func newConnectionService() *connectionService {
return &connectionService{sid: 0}
}
// Increment the connection count
func (c *connectionService) Increment() {
atomic.AddInt64(&c.count, 1)
}
// Decrement the connection count
func (c *connectionService) Decrement() {
atomic.AddInt64(&c.count, -1)
}
// Count returns the connection numbers in current
func (c *connectionService) Count() int64 {
return atomic.LoadInt64(&c.count)
}
// Reset the connection service status
func (c *connectionService) Reset() {
atomic.StoreInt64(&c.count, 0)
atomic.StoreInt64(&c.sid, 0)
}
// SessionID returns the session id
func (c *connectionService) SessionID() int64 {
return atomic.AddInt64(&c.sid, 1)
}

@ -0,0 +1,29 @@
package service
import "testing"
const paraCount = 500000
func TestNewConnectionService(t *testing.T) {
service := newConnectionService()
w := make(chan bool, paraCount)
for i := 0; i < paraCount; i++ {
go func() {
service.Increment()
service.SessionID()
w <- true
}()
}
for i := 0; i < paraCount; i++ {
<-w
}
if service.Count() != paraCount {
t.Error("wrong connection count")
}
if service.SessionID() != paraCount+1 {
t.Error("wrong session id")
}
}

@ -0,0 +1,31 @@
package session
type (
// LifetimeHandler represents a callback
// that will be called when a session close or
// session low-level connection broken.
LifetimeHandler func(*Session)
lifetime struct {
// callbacks that emitted on session closed
onClosed []LifetimeHandler
}
)
var Lifetime = &lifetime{}
// OnClosed set the Callback which will be called
// when session is closed Waring: session has closed.
func (lt *lifetime) OnClosed(h LifetimeHandler) {
lt.onClosed = append(lt.onClosed, h)
}
func (lt *lifetime) Close(s *Session) {
if len(lt.onClosed) < 1 {
return
}
for _, h := range lt.onClosed {
h(s)
}
}

@ -0,0 +1,26 @@
package session
import "sync"
// Router is used to select remote service address
type Router struct {
routes sync.Map
}
func newRouter() *Router {
return &Router{}
}
// Bind bound an address to remote service
func (r *Router) Bind(service, address string) {
r.routes.Store(service, address)
}
// Find finds the address corresponding a remote service
func (r *Router) Find(service string) (string, bool) {
v, found := r.routes.Load(service)
if !found {
return "", false
}
return v.(string), true
}

@ -0,0 +1,398 @@
package session
import (
"errors"
"net"
"ngs/service"
"sync"
"sync/atomic"
"time"
)
// NetworkEntity represent low-level network instance
type NetworkEntity interface {
Push(route string, v interface{}) error
RPC(route string, v interface{}) error
LastMid() uint64
Response(v interface{}) error
ResponseMid(mid uint64, v interface{}) error
Close() error
RemoteAddr() net.Addr
}
var (
// ErrIllegalUID represents a invalid uid
ErrIllegalUID = errors.New("illegal uid")
)
// Session represents a client session which could storage temp data during low-level
// keep connected, all data will be released when the low-level connection was broken.
// Session instance related to the client will be passed to Handler method as the first
// parameter.
type Session struct {
sync.RWMutex // protect data
id int64 // session global unique id
uid int64 // binding user id
lastTime int64 // last heartbeat time
entity NetworkEntity // low-level network entity
data map[string]interface{} // session data store
router *Router
}
// New returns a new session instance
// a NetworkEntity is a low-level network instance
func New(entity NetworkEntity) *Session {
return &Session{
id: service.Connections.SessionID(),
entity: entity,
data: make(map[string]interface{}),
lastTime: time.Now().Unix(),
router: newRouter(),
}
}
// NetworkEntity returns the low-level network agent object
func (s *Session) NetworkEntity() NetworkEntity {
return s.entity
}
// Router returns the service router
func (s *Session) Router() *Router {
return s.router
}
// RPC sends message to remote server
func (s *Session) RPC(route string, v interface{}) error {
return s.entity.RPC(route, v)
}
// Push message to client
func (s *Session) Push(route string, v interface{}) error {
return s.entity.Push(route, v)
}
// Response message to client
func (s *Session) Response(v interface{}) error {
return s.entity.Response(v)
}
// ResponseMID responses message to client, mid is
// request message ID
func (s *Session) ResponseMID(mid uint64, v interface{}) error {
return s.entity.ResponseMid(mid, v)
}
// ID returns the session id
func (s *Session) ID() int64 {
return s.id
}
// UID returns uid that bind to current session
func (s *Session) UID() int64 {
return atomic.LoadInt64(&s.uid)
}
// LastMid returns the last message id
func (s *Session) LastMid() uint64 {
return s.entity.LastMid()
}
// Bind UID to current session
func (s *Session) Bind(uid int64) error {
if uid < 1 {
return ErrIllegalUID
}
atomic.StoreInt64(&s.uid, uid)
return nil
}
// Close terminate current session, session related data will not be released,
// all related data should be Clear explicitly in Session closed callback
func (s *Session) Close() {
_ = s.entity.Close()
}
// RemoteAddr returns the remote network address.
func (s *Session) RemoteAddr() net.Addr {
return s.entity.RemoteAddr()
}
// Remove delete data associated with the key from session storage
func (s *Session) Remove(key string) {
s.Lock()
defer s.Unlock()
delete(s.data, key)
}
// Set associates value with the key in session storage
func (s *Session) Set(key string, value interface{}) {
s.Lock()
defer s.Unlock()
s.data[key] = value
}
// HasKey decides whether a key has associated value
func (s *Session) HasKey(key string) bool {
s.RLock()
defer s.RUnlock()
_, has := s.data[key]
return has
}
// Int returns the value associated with the key as a int.
func (s *Session) Int(key string) int {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(int)
if !ok {
return 0
}
return value
}
// Int8 returns the value associated with the key as a int8.
func (s *Session) Int8(key string) int8 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(int8)
if !ok {
return 0
}
return value
}
// Int16 returns the value associated with the key as a int16.
func (s *Session) Int16(key string) int16 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(int16)
if !ok {
return 0
}
return value
}
// Int32 returns the value associated with the key as a int32.
func (s *Session) Int32(key string) int32 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(int32)
if !ok {
return 0
}
return value
}
// Int64 returns the value associated with the key as a int64.
func (s *Session) Int64(key string) int64 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(int64)
if !ok {
return 0
}
return value
}
// Uint returns the value associated with the key as a uint.
func (s *Session) Uint(key string) uint {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(uint)
if !ok {
return 0
}
return value
}
// Uint8 returns the value associated with the key as a uint8.
func (s *Session) Uint8(key string) uint8 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(uint8)
if !ok {
return 0
}
return value
}
// Uint16 returns the value associated with the key as a uint16.
func (s *Session) Uint16(key string) uint16 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(uint16)
if !ok {
return 0
}
return value
}
// Uint32 returns the value associated with the key as a uint32.
func (s *Session) Uint32(key string) uint32 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(uint32)
if !ok {
return 0
}
return value
}
// Uint64 returns the value associated with the key as a uint64.
func (s *Session) Uint64(key string) uint64 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(uint64)
if !ok {
return 0
}
return value
}
// Float32 returns the value associated with the key as a float32.
func (s *Session) Float32(key string) float32 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(float32)
if !ok {
return 0
}
return value
}
// Float64 returns the value associated with the key as a float64.
func (s *Session) Float64(key string) float64 {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return 0
}
value, ok := v.(float64)
if !ok {
return 0
}
return value
}
// String returns the value associated with the key as a string.
func (s *Session) String(key string) string {
s.RLock()
defer s.RUnlock()
v, ok := s.data[key]
if !ok {
return ""
}
value, ok := v.(string)
if !ok {
return ""
}
return value
}
// Value returns the value associated with the key as a interface{}.
func (s *Session) Value(key string) interface{} {
s.RLock()
defer s.RUnlock()
return s.data[key]
}
// State returns all session state
func (s *Session) State() map[string]interface{} {
s.RLock()
defer s.RUnlock()
return s.data
}
// Restore session state after reconnect
func (s *Session) Restore(data map[string]interface{}) {
s.Lock()
defer s.Unlock()
s.data = data
}
// Clear releases all data related to current session
func (s *Session) Clear() {
s.Lock()
defer s.Unlock()
s.uid = 0
s.data = map[string]interface{}{}
}

@ -0,0 +1,175 @@
package session
import "testing"
func TestNewSession(t *testing.T) {
s := New(nil)
if s.ID() < 1 {
t.Fail()
}
}
func TestSession_Bind(t *testing.T) {
s := New(nil)
uids := []int64{100, 1000, 10000000}
for i, uid := range uids {
s.Bind(uid)
if s.UID() != uids[i] {
t.Fail()
}
}
}
func TestSession_HasKey(t *testing.T) {
s := New(nil)
key := "hello"
value := "world"
s.Set(key, value)
if !s.HasKey(key) {
t.Fail()
}
}
func TestSession_Float32(t *testing.T) {
s := New(nil)
key := "hello"
value := float32(1.2000)
s.Set(key, value)
if value != s.Float32(key) {
t.Fail()
}
}
func TestSession_Float64(t *testing.T) {
s := New(nil)
key := "hello"
value := 1.2000
s.Set(key, value)
if value != s.Float64(key) {
t.Fail()
}
}
func TestSession_Int(t *testing.T) {
s := New(nil)
key := "testkey"
value := 234
s.Set(key, value)
if value != s.Int(key) {
t.Fail()
}
}
func TestSession_Int8(t *testing.T) {
s := New(nil)
key := "testkey"
value := int8(123)
s.Set(key, value)
if value != s.Int8(key) {
t.Fail()
}
}
func TestSession_Int16(t *testing.T) {
s := New(nil)
key := "testkey"
value := int16(3245)
s.Set(key, value)
if value != s.Int16(key) {
t.Fail()
}
}
func TestSession_Int32(t *testing.T) {
s := New(nil)
key := "testkey"
value := int32(5454)
s.Set(key, value)
if value != s.Int32(key) {
t.Fail()
}
}
func TestSession_Int64(t *testing.T) {
s := New(nil)
key := "testkey"
value := int64(444454)
s.Set(key, value)
if value != s.Int64(key) {
t.Fail()
}
}
func TestSession_Uint(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint(24254)
s.Set(key, value)
if value != s.Uint(key) {
t.Fail()
}
}
func TestSession_Uint8(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint8(34)
s.Set(key, value)
if value != s.Uint8(key) {
t.Fail()
}
}
func TestSession_Uint16(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint16(4645)
s.Set(key, value)
if value != s.Uint16(key) {
t.Fail()
}
}
func TestSession_Uint32(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint32(12365)
s.Set(key, value)
if value != s.Uint32(key) {
t.Fail()
}
}
func TestSession_Uint64(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint64(1000)
s.Set(key, value)
if value != s.Uint64(key) {
t.Fail()
}
}
func TestSession_State(t *testing.T) {
s := New(nil)
key := "testkey"
value := uint64(1000)
s.Set(key, value)
state := s.State()
if value != state[key].(uint64) {
t.Fail()
}
}
func TestSession_Restore(t *testing.T) {
s := New(nil)
s2 := New(nil)
key := "testkey"
value := uint64(1000)
s.Set(key, value)
state := s.State()
s2.Restore(state)
if value != s2.Uint64(key) {
t.Fail()
}
}
Loading…
Cancel
Save