-
Notifications
You must be signed in to change notification settings - Fork 0
/
producer.go
112 lines (96 loc) · 2.95 KB
/
producer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
// Copyright 2017-2019 Skroutz S.A.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"log"
"os"
"strings"
"sync"
"sync/atomic"
rdkafka "github.com/confluentinc/confluent-kafka-go/kafka"
)
type Producer struct {
id string
rdProd *rdkafka.Producer
log *log.Logger
started bool
wg sync.WaitGroup
}
func NewProducer(cfg rdkafka.ConfigMap) (*Producer, error) {
rdProd, err := rdkafka.NewProducer(&cfg)
if err != nil {
return nil, err
}
id := strings.TrimPrefix(rdProd.String(), "rdkafka#")
return &Producer{
id: id,
rdProd: rdProd,
log: log.New(os.Stderr, fmt.Sprintf("[%s] ", id), log.Ldate|log.Ltime),
started: false}, nil
}
func (p *Producer) String() string {
return p.id
}
func (p *Producer) Produce(msg *rdkafka.Message) error {
if !p.started {
p.wg.Add(1)
go p.consumeDeliveries()
p.started = true
p.log.Printf("Started working...")
}
return p.rdProd.Produce(msg, nil)
}
// Flush flushes any remaining messages. It blocks until all messages are
// delivered or timeoutMs elapses and returns the number of
// outstanding messages.
func (p *Producer) Flush(timeoutMs int) int {
return p.rdProd.Flush(timeoutMs)
}
// Close stops p after flushing any buffered messages. It is a blocking
// operation.
func (p *Producer) Close() {
unflushed := p.Flush(5000)
if unflushed > 0 {
p.log.Printf("Error flushing: %d unflushed events", unflushed)
atomic.AddUint64(&stats.producerUnflushed, uint64(unflushed))
}
// signal consumeDeliveries() to exit by closing p.rdProd.Events()
// channel
p.rdProd.Close()
p.wg.Wait()
p.log.Print("Bye")
}
func (p *Producer) consumeDeliveries() {
defer p.wg.Done()
for ev := range p.rdProd.Events() {
msg, ok := ev.(*rdkafka.Message)
if ok {
if err := msg.TopicPartition.Error; err != nil {
p.log.Printf("Error delivering `%s` to %s: %s", msg.Value, msg, err)
atomic.AddUint64(&stats.producerErr, 1)
}
} else {
p.log.Printf("Error consuming delivery event: Unknown event type (%s)", ev)
// We can ignore 'connection reset by peer' connection errors.
// See https://github.com/edenhill/librdkafka/wiki/FAQ#why-am-i-seeing-receive-failed-disconnected
// See https://github.com/skroutz/rafka/pull/90
if strings.Contains(ev.String(), "Connection reset by peer") {
continue
}
atomic.AddUint64(&stats.producerUnknownEvents, 1)
}
}
}