-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.go
More file actions
218 lines (197 loc) · 6.94 KB
/
server.go
File metadata and controls
218 lines (197 loc) · 6.94 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
package pegasus
import (
"fmt"
"sync"
"sync/atomic"
"time"
"6.824/labgob"
"6.824/labrpc"
"6.824/raft"
)
func (kv *KVServer) AddRaftOp(args *OpArgs, reply *OpReply) {
var topic Topic
if args.Op == GetVal {
topic = KV_GET
} else {
topic = KV_PUTAPPEND
}
requestId := args.RequestId
clientId := args.ClientId
kv.mu.Lock()
existingReq, ok := kv.requests[clientId]
kv.mu.Unlock()
if ok && existingReq.Id == requestId {
// duplicate request!
result := existingReq.Result
if !result.isFinished {
// the old thread hasn't gotten the result yet - we can tell this to the client who can resubmit this req after a timeout.
reply.Err = ErrNotFinishedYet
return
}
kv.logMsg(topic, fmt.Sprintf("Duplicate request found for requestId %v! Returning existing result %v", requestId, result.Value))
reply.Value = result.Value
return
}
command := PegasusCommand{
RequestId: requestId,
Op: args.Op,
Key: args.Key,
Value: args.Value,
}
kv.consumerCond.L.Lock()
index, _, isLeader := kv.rf.Start(command)
if !isLeader {
reply.Err = ErrWrongLeader
kv.consumerCond.L.Unlock()
return
}
if requestId == FAKE_REQUEST_ID {
// process no further, this req exists only as an HB
kv.consumerCond.L.Unlock()
return
}
kv.logMsg(topic, fmt.Sprintf("Sent command with id %v, will wait for index %v!", requestId, index))
kv.requests[clientId] = &Request{Id: requestId, Index: index}
for kv.lastAppliedIndex != index {
kv.logMsg(topic, fmt.Sprintf("lastAppliedIndex %v != expectedIndex %v, so will sleep", kv.lastAppliedIndex, index))
kv.consumerCond.Wait()
}
kv.logMsg(topic, fmt.Sprintf("Found my expected index %v! Signaling producer...", index))
kv.consumed = true
kv.producerCond.Signal()
if kv.lastAppliedId != requestId {
kv.logMsg(topic, fmt.Sprintf("Found a different log message id (%v) than expected (%v)", kv.lastAppliedId, requestId))
reply.Err = ErrLogOverwritten
delete(kv.requests, clientId)
kv.consumerCond.L.Unlock()
return
}
reply.Value = kv.lastAppliedKeyValue.Value
currentRequest := kv.requests[clientId]
currentRequest.Result = Result{isFinished: true, Value: reply.Value}
kv.logMsg(topic, fmt.Sprintf("Stored value %v, and returning it for key %v successfully!", reply.Value, kv.lastAppliedKeyValue.Key))
kv.consumerCond.L.Unlock()
}
//
// the tester calls Kill() when a KVServer instance won't
// be needed again. for your convenience, we supply
// code to set rf.dead (without needing a lock),
// and a killed() method to test rf.dead in
// long-running loops. you can also add your own
// code to Kill(). you're not required to do anything
// about this, but it may be convenient (for example)
// to suppress debug output from a Kill()ed instance.
//
func (kv *KVServer) Kill() {
atomic.StoreInt32(&kv.dead, 1)
kv.rf.Kill()
// Your code here, if desired.
}
func (kv *KVServer) killed() bool {
z := atomic.LoadInt32(&kv.dead)
return z == 1
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Raft to
// form the fault-tolerant key/value service.
// me is the index of the current server in servers[].
// the k/v server should store snapshots through the underlying Raft
// implementation, which should call persister.SaveStateAndSnapshot() to
// atomically save the Raft state along with the snapshot.
// the k/v server should snapshot when Raft's saved state exceeds maxraftstate bytes,
// in order to allow Raft to garbage-collect its log. if maxraftstate is -1,
// you don't need to snapshot.
// StartKVServer() must return quickly, so it should start goroutines
// for any long-running work.
//
func StartKVServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int) *KVServer {
labgob.Register(PegasusCommand{})
kv := new(KVServer)
kv.me = me
kv.maxraftstate = maxraftstate
kv.stateMachine = make(map[string]string)
kv.consumerCond = *sync.NewCond(&kv.mu)
kv.producerCond = *sync.NewCond(&kv.mu)
kv.applyCh = make(chan raft.ApplyMsg)
kv.consumed = true
kv.requests = make(map[int64]*Request)
kv.duplicate = make(map[int64]bool)
kv.logMsg(KV_SETUP, fmt.Sprintf("Initialized peagasus server S%v!", me))
go kv.listenOnApplyCh()
kv.rf = raft.Make(servers, me, persister, kv.applyCh)
go kv.sendPeriodicGet()
return kv
}
func (kv *KVServer) listenOnApplyCh() {
for applyMsg := range kv.applyCh {
pegasusCommand := applyMsg.Command.(PegasusCommand)
key := pegasusCommand.Key
value := pegasusCommand.Value
operation := pegasusCommand.Op
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Got new message on applyMsg %v (index %v)", applyMsg.Command, applyMsg.CommandIndex))
kv.mu.Lock()
_, duplicate := kv.duplicate[pegasusCommand.RequestId]
if !duplicate && operation == PutVal {
kv.stateMachine[key] = value
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Finished operation PutVal. Updated value for %v to %v", key, kv.stateMachine[key]))
} else if !duplicate && operation == AppendVal {
prevValue := kv.stateMachine[key]
kv.stateMachine[key] = prevValue + value
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Finished operation AppendVal for key %v. New value is %v", key, kv.stateMachine[key]))
} else {
value = kv.stateMachine[key]
}
kv.duplicate[pegasusCommand.RequestId] = true
kv.lastAppliedId = pegasusCommand.RequestId
kv.lastAppliedIndex = applyMsg.CommandIndex
kv.lastAppliedKeyValue.Key = key
kv.lastAppliedKeyValue.Value = value
kv.consumed = false
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Sending consumer broadcast!"))
kv.consumerCond.Broadcast()
// wait for the value we just produced to be consumed, just before producing another value
// However, before waiting, we need to check if someone is even expecting this message.
// go through the requests map, and see if any values match this CommandIndex.
someonesWaiting := false
for _, v := range kv.requests {
if v.Index == applyMsg.CommandIndex {
someonesWaiting = true
break
}
}
if someonesWaiting {
// wait for the current value to be consumed by them.
for !kv.consumed {
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Going into wait... context: index=%v", applyMsg.CommandIndex))
kv.producerCond.Wait()
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Awoken! context: index=%v", applyMsg.CommandIndex))
}
} else {
kv.logMsg(KV_APPLYCH, fmt.Sprintf("Skipped waiting because no one is waiting for index %v", applyMsg.CommandIndex))
kv.consumed = true // treat the current value to be consumed because no one is going to (and some previous leader already did)
}
kv.mu.Unlock()
}
}
func (kv *KVServer) sendPeriodicGet() {
for {
time.Sleep(time.Millisecond * PERIODIC_GET_WAIT)
opArgs := OpArgs{
Key: "fake key",
Op: GetVal,
RequestId: FAKE_REQUEST_ID,
ClientId: FAKE_CLIENT_ID,
}
kv.AddRaftOp(&opArgs, &OpReply{})
}
}
func (kv *KVServer) IsLeader(args *FindLeaderArgs, reply *FindLeaderReply) {
_, reply.IsLeader = kv.rf.GetState()
}
func (kv *KVServer) getId() int {
kv.mu.Lock()
defer kv.mu.Unlock()
kv.counter += 1
return kv.counter
}