forked from jdamick/kafka
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconsumer.go
More file actions
259 lines (224 loc) · 7.58 KB
/
consumer.go
File metadata and controls
259 lines (224 loc) · 7.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/*
* Copyright (c) 2011 NeuStar, Inc.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* NeuStar, the Neustar logo and related names and logos are registered
* trademarks, service marks or tradenames of NeuStar, Inc. All other
* product names, company names, marks, logos and symbols may be trademarks
* of their respective owners.
*/
package kafka
import (
"encoding/binary"
"errors"
"io"
"log"
"net"
"time"
"os"
)
const (
CONNECTION_RETRY_WAIT_IN_SECONDS = 10
)
type BrokerConsumer struct {
broker *Broker
offset uint64
maxSize uint32
codecs map[byte]PayloadCodec
}
// Create a new broker consumer
// hostname - host and optionally port, delimited by ':'
// topic to consume
// partition to consume from
// offset to start consuming from
// maxSize (in bytes) of the message to consume (this should be at least as big as the biggest message to be published)
func NewBrokerConsumer(hostname string, topic string, partition int, offset uint64, maxSize uint32) *BrokerConsumer {
return &BrokerConsumer{broker: newBroker(hostname, topic, partition),
offset: offset,
maxSize: maxSize,
codecs: DefaultCodecsMap}
}
// Simplified consumer that defaults the offset and maxSize to 0.
// hostname - host and optionally port, delimited by ':'
// topic to consume
// partition to consume from
func NewBrokerOffsetConsumer(hostname string, topic string, partition int) *BrokerConsumer {
return &BrokerConsumer{broker: newBroker(hostname, topic, partition),
offset: 0,
maxSize: 0,
codecs: DefaultCodecsMap}
}
// Add Custom Payload Codecs for Consumer Decoding
// payloadCodecs - an array of PayloadCodec implementations
func (consumer *BrokerConsumer) AddCodecs(payloadCodecs []PayloadCodec) {
// merge to the default map, so one 'could' override the default codecs..
for k, v := range codecsMap(payloadCodecs) {
consumer.codecs[k] = v
}
}
// Keeps consuming forward until quit, outputing errors, but not dying on them
func (consumer *BrokerConsumer) ConsumeUntilQuit(pollTimeoutMs int64, quit chan os.Signal, msgHandler func(*Message)) (int64, int64, error) {
messageCount := int64(0)
skippedMessageCount := int64(0)
quitReceived := false
done := make(chan bool, 1)
go func() {
<-quit
quitReceived = true
}()
go func() {
var conn *net.TCPConn
var lastConnectError error
conn, lastConnectError = consumer.broker.connect()
for !quitReceived {
if lastConnectError != nil {
conn, lastConnectError = consumer.broker.connect()
if lastConnectError != nil {
log.Printf("ERROR: [%s] Couldn't connect to Kafka server: %#v, sleeping %d seconds to retry...\n", consumer.broker.topic, lastConnectError, CONNECTION_RETRY_WAIT_IN_SECONDS)
time.Sleep(time.Duration(CONNECTION_RETRY_WAIT_IN_SECONDS * 1000) * time.Millisecond)
}
}
if lastConnectError == nil {
_, err := consumer.consumeWithConn(conn, msgHandler)
if err != nil && err != io.EOF {
log.Printf("ERROR: [%s] %#v\n", consumer.broker.topic, err)
skippedMessageCount++
netError, netok := err.(*net.OpError)
if (netok) {
if(!netError.Temporary()) {
conn, lastConnectError = consumer.broker.connect()
}
}
} else {
messageCount++
}
time.Sleep(time.Duration(pollTimeoutMs) * time.Millisecond)
}
}
done <- true
}()
<-done // wait until the last iteration finishes before returning
return messageCount, skippedMessageCount, nil
}
func (consumer *BrokerConsumer) ConsumeOnChannel(msgChan chan *Message, pollTimeoutMs int64, quit chan bool) (int, error) {
conn, err := consumer.broker.connect()
if err != nil {
return -1, err
}
num := 0
done := make(chan bool, 1)
go func() {
for {
_, err := consumer.consumeWithConn(conn, func(msg *Message) {
msgChan <- msg
num += 1
})
if err != nil {
if err != io.EOF {
log.Println("Fatal Error: ", err)
panic(err)
}
quit <- true // force quit
break
}
time.Sleep(time.Millisecond * time.Duration(pollTimeoutMs))
}
done <- true
}()
// wait to be told to stop..
<-quit
conn.Close()
close(msgChan)
<-done
return num, err
}
type MessageHandlerFunc func(msg *Message)
func (consumer *BrokerConsumer) Consume(handlerFunc MessageHandlerFunc) (int, error) {
conn, err := consumer.broker.connect()
if err != nil {
return -1, err
}
defer conn.Close()
num, err := consumer.consumeWithConn(conn, handlerFunc)
if err != nil {
log.Println("Fatal Error: ", err)
}
return num, err
}
func (consumer *BrokerConsumer) consumeWithConn(conn *net.TCPConn, handlerFunc MessageHandlerFunc) (int, error) {
_, err := conn.Write(consumer.broker.EncodeConsumeRequest(consumer.offset, consumer.maxSize))
if err != nil {
return -1, err
}
length, payload, err := consumer.broker.readResponse(conn)
if err != nil {
return -1, err
}
num := 0
if length > 2 {
// parse out the messages
var currentOffset uint64 = 0
for currentOffset < uint64(len(payload)) && currentOffset <= uint64(length-4) {
totalLength, msgs := Decode(payload[currentOffset:], consumer.codecs)
if msgs == nil {
// update the broker's offset for next consumption incase they want to skip this message and keep going
consumer.offset += currentOffset
return num, errors.New("Error Decoding Message")
}
msgOffset := consumer.offset + currentOffset
for _, msg := range msgs {
// update all of the messages offset
// multiple messages can be at the same offset (compressed for example)
msg.offset = msgOffset
handlerFunc(&msg)
num += 1
}
currentOffset += uint64(4 + totalLength)
}
// update the broker's offset for next consumption
consumer.offset += currentOffset
}
return num, err
}
// Get a list of valid offsets (up to maxNumOffsets) before the given time, where
// time is in milliseconds (-1, from the latest offset available, -2 from the smallest offset available)
// The result is a list of offsets, in descending order.
func (consumer *BrokerConsumer) GetOffsets(time int64, maxNumOffsets uint32) ([]uint64, error) {
offsets := make([]uint64, 0)
conn, err := consumer.broker.connect()
if err != nil {
return offsets, err
}
defer conn.Close()
_, err = conn.Write(consumer.broker.EncodeOffsetRequest(time, maxNumOffsets))
if err != nil {
return offsets, err
}
length, payload, err := consumer.broker.readResponse(conn)
if err != nil {
return offsets, err
}
if length > 4 {
// get the number of offsets
numOffsets := binary.BigEndian.Uint32(payload[0:])
var currentOffset uint64 = 4
for currentOffset < uint64(length-4) && uint32(len(offsets)) < numOffsets {
offset := binary.BigEndian.Uint64(payload[currentOffset:])
offsets = append(offsets, offset)
currentOffset += 8 // offset size
}
}
return offsets, err
}