-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserver.go
163 lines (142 loc) · 3.81 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
package graft
import (
"fmt"
"log"
"net"
"net/rpc"
"sync"
)
// Server wraps a raft.ConsensusModule along with a rpc.Server that exposes its
// methods as RPC endpoints. It also manages the peers of the Raft server. The
// main goal of this type is to simply the code of raft.Server for presentation
// purposes. raft.ConsensusModule has a *Server to do its peer communication and
// doesn't have to worry about the specifics of running an RPC server.
type Server struct {
mu sync.Mutex
serverId int
peerIds []int
cm *ConsensusModule
storage Storage
rpcProxy *RPCProxy
commitChan chan<- CommitEntry
peerClients map[int]*rpc.Client
rpcServer *rpc.Server
listener net.Listener
ready <-chan interface{}
quit chan interface{}
wg sync.WaitGroup
}
func NewServer(serverId int, peerIds []int, storage Storage, ready <-chan interface{}, commitChan chan<- CommitEntry) *Server {
s := new(Server)
s.serverId = serverId
s.peerIds = peerIds
s.peerClients = make(map[int]*rpc.Client)
s.storage = storage
s.ready = ready
s.commitChan = commitChan
s.quit = make(chan interface{})
return s
}
func (s *Server) Serve() {
s.mu.Lock()
s.cm = NewConsensusModule(s.serverId, s.peerIds, s, s.storage, s.ready, s.commitChan)
// Create a new RPC server and register a RPCProxy that forwards all methods
// to n.cm
s.rpcServer = rpc.NewServer()
s.rpcProxy = &RPCProxy{cm: s.cm}
s.rpcServer.RegisterName("ConsensusModule", s.rpcProxy)
var err error
s.listener, err = net.Listen("tcp", ":0")
if err != nil {
log.Fatal(err)
}
log.Printf("[%v] listening at %s", s.serverId, s.listener.Addr())
s.mu.Unlock()
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
conn, err := s.listener.Accept()
if err != nil {
select {
case <-s.quit:
return
default:
log.Fatal("accept error:", err)
}
s.wg.Add(1)
go func() {
s.rpcServer.ServeConn(conn)
s.wg.Done()
}()
}
}
}()
}
// DisconnectAll closes all the client connections to peer for this server.
func (s *Server) DisconnectAll() {
s.mu.Lock()
defer s.mu.Unlock()
for id := range s.peerClients {
if s.peerClients[id] != nil {
s.peerClients[id].Close()
s.peerClients[id] = nil
}
}
}
// Shutdown closes the server and waits for it to shut down properly.
func (s *Server) Shutdown() {
s.cm.Stop()
close(s.quit)
s.listener.Close()
s.wg.Wait()
}
func (s *Server) GetListenAddr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
return s.listener.Addr()
}
func (s *Server) ConnectToPeer(peerId int, addr net.Addr) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.peerClients[peerId] == nil {
client, err := rpc.Dial(addr.Network(), addr.String())
if err != nil {
return err
}
s.peerClients[peerId] = client
}
return nil
}
// RPCProxy is a trivial pass-thru proxy type for ConsensusModule's RPC methods.
// It's useful for:
// - Simulating a small delay in RPC transmission
// - Avoiding running into https://github.com/golang/go/issues/19957
// - Simulating possible unreliable connections by delaying some messages
// significantly and dropping others when RAFT_UNRELIABLE_RPC is set.
type RPCProxy struct {
cm *ConsensusModule
}
// DisconnectToPeer disconnects this server from the peer identified by peerId.
func (s *Server) DisconnectToPeer(peerId int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.peerClients[peerId] != nil {
err := s.peerClients[peerId].Close()
s.peerClients[peerId] = nil
return err
}
return nil
}
func (s *Server) Call(id int, serviceMethod string, args interface{}, reply interface{}) error {
s.mu.Lock()
peer := s.peerClients[id]
s.mu.Unlock()
// If this is called after shutdown (where client.CLose is called),
// it will return an error
if peer == nil {
return fmt.Errorf("call client %d after it's closed", id)
} else {
return peer.Call(serviceMethod, args, reply)
}
}