1+ # Used in some log events to identify the source
2+ id =Upstream_17
3+ # Used in the MTU code
4+ nic =lo0
5+ # UDP target (downstream, use static arp and route to be able to send packets over a diode)
6+ # targetIP=kafka-downstream.sitia.nu
7+ targetIP =localhost
8+ # UDP target port
9+ targetPort =1234
10+ # Kafka source. If more than one, separate the servers with a comma ,
11+ source =kafka
12+ bootstrapServers =kafka-upstream.sitia.nu:9094,kafka-upstream.sitia.nu:8094
13+ # Topic to read
14+ topic =transfer
15+ # Kafka group id to use. If several threads are used, this is prepended to the thread names.
16+ groupID =17
17+ logLevel =info
18+ # Read from this time instead of starting at the end
19+ # 2024-01-28T10:24:55+01:00
20+ from =
21+ # For testing, you can use random, else use kafka
22+ # source=random
23+ source =kafka
24+ # Downstream public key file
25+ encryption =false
26+ publicKeyFile =certs/server2.pem
27+ # Every n seconds, generate a new symmetric key
28+ generateNewSymmetricKeyEvery =50
29+ # payloadSize=auto
30+ payloadSize =1500
31+ # After reading the config, where should we send the logs? Default is stdout
32+ # logFileName=./tmp/upstream.log
33+ # Format: {"name": "thread_name", "offset": offset_in_seconds}
34+ # sendingThreads=[{"now": 0}, {"3minutes": -10}]
35+ sendingThreads =[{"No-delay": 0}]
36+ # TLS to Kafka
37+ # Certificate file
38+ certFile =certs/tmp/airgap-upstream.crt
39+ # Key file
40+ keyFile =certs/tmp/airgap-upstream.key
41+ # CA file
42+ caFile =certs/tmp/kafka-ca.crt
43+
44+ # To test the gap detection removing gaps, remove the filter out for previously dropped packets
45+ # Deliver every even numbered packet:
46+ # deliverFilter=2,4,6
47+ # compress the payload if events longer than this in bytes
48+ compressWhenLengthExceeds =1200
49+
50+ logStatistics =60
51+
52+ eps =500
0 commit comments