forked from letsencrypt/boulder
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
229 lines (217 loc) · 8.13 KB
/
docker-compose.yml
File metadata and controls
229 lines (217 loc) · 8.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
services:
boulder:
# The `letsencrypt/boulder-tools:latest` tag is automatically built in local
# dev environments. In CI a specific BOULDER_TOOLS_TAG is passed, and it is
# pulled with `docker compose pull`.
image: &boulder_tools_image letsencrypt/boulder-tools:${BOULDER_TOOLS_TAG:-latest}
build:
context: test/boulder-tools/
# Should match one of the GO_CI_VERSIONS in test/boulder-tools/tag_and_upload.sh.
args:
GO_VERSION: 1.26.1
environment:
# To solve HTTP-01 and TLS-ALPN-01 challenges, change the IP in FAKE_DNS
# to the IP address where your ACME client's solver is listening. This is
# pointing at the boulder service's "public" IP, where challtestsrv is.
FAKE_DNS: 64.112.117.122
BOULDER_CONFIG_DIR: test/config
USE_VITESS: false
GOCACHE: /boulder/.gocache/go-build
volumes:
- .:/boulder:cached
- ./.gocache:/root/.cache/go-build:cached
- ./test/certs/.softhsm-tokens/:/var/lib/softhsm/tokens/:cached
networks:
bouldernet:
ipv4_address: 10.77.77.77
publicnet:
ipv4_address: 64.112.117.122
publicnet2:
ipv4_address: 64.112.117.134
# Use consul as a backup to Docker's embedded DNS server. If there's a name
# Docker's DNS server doesn't know about, it will forward the query to this
# IP (running consul).
# (https://docs.docker.com/config/containers/container-networking/#dns-services).
# This is used to look up service names via A records (like ra.service.consul) that
# are configured via the ServerAddress field of cmd.GRPCClientConfig.
# TODO: Remove this when ServerAddress is deprecated in favor of SRV records
# and DNSAuthority.
dns: 10.77.77.10
extra_hosts:
# Allow the boulder container to be reached as "ca.example.org", so we
# can put that name inside our integration test certs (e.g. as a crl
# url) and have it look like a publicly-accessible name.
# TODO(#8215): Move s3-test-srv to a separate service.
- "ca.example.org:64.112.117.122"
# Allow the boulder container to be reached as "integration.trust", for
# similar reasons, but intended for use as a SAN rather than a CRLDP.
# TODO(#8215): Move observer's probe target to a separate service.
- "integration.trust:64.112.117.122"
ports:
- 4001:4001 # ACMEv2
- 4003:4003 # SFE
depends_on:
bmariadb:
condition: service_started
bproxysql:
condition: service_started
bvitess:
condition: service_healthy
bredis_1:
condition: service_started
bredis_2:
condition: service_started
bconsul:
condition: service_started
bjaeger:
condition: service_started
bpkimetal:
condition: service_started
entrypoint: test/entrypoint.sh
working_dir: &boulder_working_dir /boulder
bsetup:
image: *boulder_tools_image
volumes:
- .:/boulder:cached
- ./.gocache:/root/.cache/go-build:cached
- ./test/certs/.softhsm-tokens/:/var/lib/softhsm/tokens/:cached
entrypoint: test/certs/generate.sh
working_dir: *boulder_working_dir
profiles:
# Adding a profile to this container means that it won't be started by a
# normal "docker compose up/run boulder", only when specifically invoked
# with a "docker compose up bsetup".
- setup
bmariadb:
image: mariadb:10.11.13
volumes:
- ./sa/db:/docker-entrypoint-initdb.d
networks:
bouldernet:
aliases:
- boulder-mariadb
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
bproxysql:
image: proxysql/proxysql:2.7.2
# The --initial flag force resets the ProxySQL database on startup. By
# default, ProxySQL ignores new configuration if the database already
# exists. Without this flag, new configuration wouldn't be applied until you
# ran `docker compose down`.
entrypoint: proxysql -f --idle-threads -c /test/proxysql/proxysql.cnf --initial
volumes:
- ./test/:/test/:cached
depends_on:
- bmariadb
networks:
bouldernet:
aliases:
- boulder-proxysql
bredis_1:
image: redis:7.0.15
volumes:
- ./test/:/test/:cached
command: redis-server /test/redis-ratelimits.config
networks:
bouldernet:
ipv4_address: 10.77.77.4
bredis_2:
image: redis:7.0.15
volumes:
- ./test/:/test/:cached
command: redis-server /test/redis-ratelimits.config
networks:
bouldernet:
ipv4_address: 10.77.77.5
bconsul:
image: hashicorp/consul:1.19.2
volumes:
- ./test/:/test/:cached
networks:
bouldernet:
ipv4_address: 10.77.77.10
command: "consul agent -dev -config-format=hcl -config-file=/test/consul/config.hcl"
bjaeger:
image: jaegertracing/all-in-one:1.50
networks:
- bouldernet
bpkimetal:
image: ghcr.io/pkimetal/pkimetal:v1.20.0
networks:
- bouldernet
bvitess:
# The `letsencrypt/boulder-vtcomboserver:latest` tag is automatically built
# in local dev environments. In CI a specific BOULDER_VTCOMBOSERVER_TAG is
# passed, and it is pulled with `docker compose pull`.
image: letsencrypt/boulder-vtcomboserver:${BOULDER_VTCOMBOSERVER_TAG:-latest}
build:
context: test/vtcomboserver/
volumes:
- ./:/boulder/
environment:
# By specifying KEYSPACES vttestserver will create the corresponding
# databases on startup.
KEYSPACES: boulder_sa,incidents_sa,boulder_sa_next,incidents_sa_next
NUM_SHARDS: 1,1,1,1
healthcheck:
# Make sure the service is up and the tables are created. Use `serials` because it happens
# to be last in the SQL initialization files, so if it exists the other tables do too.
# Note that the mysql command issues some queries on startup that result in this spurious
# logging from bvitess:
# You have an error in your SQL syntax; check the manual that corresponds to your MySQL
# server version for the right syntax to use near '$$ from dual limit 10001'
test: [ "CMD", "mysql", "-h", "127.0.0.1", "-P", "33577", "-D", "boulder_sa",
"-e", "SELECT 1 FROM serials"]
interval: 2s
timeout: 30s
retries: 3
start_period: 10s
start_interval: 2s
networks:
bouldernet:
aliases:
- boulder-vitess
networks:
# This network represents the data-center internal network. It is used for
# boulder services and their infrastructure, such as consul, mariadb, and
# redis.
bouldernet:
driver: bridge
ipam:
driver: default
config:
- subnet: 10.77.77.0/24
# Only issue DHCP addresses in the top half of the range, to avoid
# conflict with static addresses.
ip_range: 10.77.77.128/25
# This network represents the public internet. It uses a real public IP space
# (that Let's Encrypt controls) so that our integration tests are happy to
# validate and issue for it. It is used by challtestsrv, which binds to
# 64.112.117.122:80 and :443 for its HTTP-01 challenge responder.
#
# TODO(#8215): Put s3-test-srv on this network.
publicnet:
driver: bridge
ipam:
driver: default
config:
- subnet: 64.112.117.0/25
# This network is used for two things in the integration tests:
# - challtestsrv binds to 64.112.117.134:443 for its tls-alpn-01 challenge
# responder, to avoid interfering with the HTTPS port used for testing
# HTTP->HTTPS redirects during http-01 challenges. Note: this could
# probably be updated in the future so that challtestsrv can handle
# both tls-alpn-01 and HTTPS on the same port.
# - test/v2_integration.py has some test cases that start their own HTTP
# server instead of relying on challtestsrv, because they want very
# specific behavior. For these cases, v2_integration.py creates a Python
# HTTP server and binds it to 64.112.117.134:80.
#
# TODO(#8215): Deprecate this network, replacing it with individual IPs within
# the existing publicnet.
publicnet2:
driver: bridge
ipam:
driver: default
config:
- subnet: 64.112.117.128/25