-
Notifications
You must be signed in to change notification settings - Fork 2
seaweedfs
SeaweedFS is a fast, highly scalable, distributed file system, ideal for a wide variety of applications that need small to large files available anytime, anywhere. This is the key piece that can be combined with Overlord.
A Makejail is available for SeaweedFS, however due to the flexible nature of this distributed file system the Makejail does not implement a use case, but that is not a problem as we will automate everything.
metadata.yml:
kind: metadata
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- desktop
- r2
- carl
- centralita
metadata:
seaweedfs-master.sh: |
daemon -t "Distributed Object Store and Filesystem (master)" -p /weed/.pid -o /weed/.log \
/weed/weed master -ip=${MASTER_IP} -peers=${MASTER_PEERS} -mdir=/data
seaweedfs-master.makejail: |
INCLUDE gh+DtxdF/efficient-makejail
INCLUDE gh+AppJail-makejails/seaweedfs
COPY ${OVERLORD_METADATA}/seaweedfs-master.sh /seaweedfs-master.sh
CMD chmod +x /seaweedfs-master.sh
STOP
STAGE start
USER seaweedfs
WORKDIR /weed
RUN /seaweedfs-master.sh
seaweedfs-volume.sh: |
daemon -t "Distributed Object Store and Filesystem (volume)" -p /weed/.pid -o /weed/.log \
/weed/weed volume -dataCenter=${VOLUME_DATACENTER} -rack=${VOLUME_RACK} -dir=$PWD/volume -ip=${VOLUME_IP} -mserver=${VOLUME_PEERS} -dir=/data
seaweedfs-volume.makejail: |
INCLUDE gh+DtxdF/efficient-makejail
INCLUDE gh+AppJail-makejails/seaweedfs
COPY ${OVERLORD_METADATA}/seaweedfs-volume.sh /seaweedfs-volume.sh
CMD chmod +x /seaweedfs-volume.sh
STOP
STAGE start
USER seaweedfs
WORKDIR /weed
RUN /seaweedfs-volume.sh
seaweedfs-filer.sh: |
daemon -t "Distributed Object Store and Filesystem (filer)" -p /weed/.pid -o /weed/.log \
./weed filer -defaultReplicaPlacement=100 -dataCenter=${FILER_DATACENTER} -ip=${FILER_IP} -master=${FILER_PEERS} -rack=${FILER_RACK} -port 8889
seaweedfs-filer.makejail: |
INCLUDE gh+DtxdF/efficient-makejail
INCLUDE gh+AppJail-makejails/seaweedfs
COPY ${OVERLORD_METADATA}/seaweedfs-filer.sh /seaweedfs-filer.sh
CMD chmod +x /seaweedfs-filer.sh
COPY ${OVERLORD_METADATA}/seaweedfs-filer.toml /weed/filer.toml
STOP
STAGE start
USER seaweedfs
WORKDIR /weed
RUN /seaweedfs-filer.sh
seaweedfs-filer.toml: |
# A sample TOML config file for SeaweedFS filer store
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
####################################################
# Customizable filer server options
####################################################
[filer.options]
# with http DELETE, by default the filer would check whether a folder is empty.
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
recursive_delete = false
#max_file_name_length = 255
####################################################
# The following are filer store options
####################################################
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = false
dir = "./filerldb2" # directory to store level db files
[leveldb3]
# similar to leveldb2.
# each bucket has its own meta store.
enabled = false
dir = "./filerldb3" # directory to store level db files
[rocksdb]
# local on disk, similar to leveldb
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
[sqlite]
# local on disk, similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
[mysql] # or memsql, tidb
# CREATE TABLE IF NOT EXISTS `filemeta` (
# `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
# `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
# `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
# `meta` LONGBLOB,
# PRIMARY KEY (`dirhash`, `name`)
# ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
enabled = false
# dsn will take priority over "hostname, port, username, password, database".
# [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
dsn = "root@tcp(localhost:3306)/seaweedfs?collation=utf8mb4_bin"
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS `%s` (
`dirhash` BIGINT NOT NULL,
`name` VARCHAR(766) NOT NULL,
`directory` TEXT NOT NULL,
`meta` LONGBLOB,
PRIMARY KEY (`dirhash`, `name`)
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
"""
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
[postgres2]
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS "%s" (
dirhash BIGINT,
name VARCHAR(65535),
directory VARCHAR(65535),
meta bytea,
PRIMARY KEY (dirhash, name)
);
"""
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "postgres" # create or use an existing database
schema = ""
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
[cassandra2]
# CREATE TABLE filemeta (
# dirhash bigint,
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY ((dirhash, directory), name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace = "seaweedfs"
hosts = [
"localhost:9042",
]
username = ""
password = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
# Gocql connection timeout, default: 600ms
connection_timeout_millisecond = 600
[hbase]
enabled = false
zkquorum = ""
table = "seaweedfs"
[redis2]
enabled = false
address = "localhost:6379"
password = ""
database = 0
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis2_sentinel]
enabled = false
addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
masterName = "master"
username = ""
password = ""
database = 0
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
[redis_cluster2]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
# The following lua redis stores uses lua to ensure atomicity
[redis_lua]
enabled = false
address = "localhost:6379"
password = ""
database = 0
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_lua_sentinel]
enabled = false
addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
masterName = "master"
username = ""
password = ""
database = 0
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
[redis_lua_cluster]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
enable_tls = false
ca_cert_path = ""
client_cert_path = ""
client_key_path = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[etcd]
enabled = true
servers = "100.65.139.52:2379,100.109.0.125:2379,100.96.18.2:2379"
username = ""
password = ""
key_prefix = "seaweedfs."
timeout = "3s"
# Set the CA certificate path
tls_ca_file=""
# Set the client certificate path
tls_client_crt_file=""
# Set the client private key path
tls_client_key_file=""
[mongodb]
enabled = false
uri = "mongodb://localhost:27017"
username = ""
password = ""
ssl = false
ssl_ca_file = ""
ssl_cert_file = ""
ssl_key_file = ""
insecure_skip_verify = false
option_pool_size = 0
database = "seaweedfs"
[elastic7]
enabled = false
servers = [
"http://localhost1:9200",
"http://localhost2:9200",
"http://localhost3:9200",
]
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000
[arangodb] # in development dont use it
enabled = false
db_name = "seaweedfs"
servers=["http://localhost:8529"] # list of servers to connect to
# only basic auth supported for now
username=""
password=""
# skip tls cert validation
insecure_skip_verify = true
[ydb] # https://ydb.tech/
enabled = false
dsn = "grpc://localhost:2136?database=/local"
prefix = "seaweedfs"
useBucketPrefix = true # Fast Bucket Deletion
poolSizeLimit = 50
dialTimeOut = 10
# Authenticate produced with one of next environment variables:
# YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
# YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
# YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
# YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
##########################
##########################
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra2.tmp
# 2. Add a location configuration. E.g., location = "/tmp/"
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true
#
# The following is just using redis as an example
##########################
[redis2.tmp]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
[tikv]
enabled = false
# If you have many pd address, use ',' split then:
# pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
pdaddrs = "localhost:2379"
# Concurrency for TiKV delete range
deleterange_concurrency = 1
# Enable 1PC
enable_1pc = false
# Set the CA certificate path
ca_path=""
# Set the certificate path
cert_path=""
# Set the private key path
key_path=""
# The name list used to verify the cn name
verify_cn=""
[tarantool]
address = "localhost:3301"
user = "guest"
password = ""
timeout = "5s"
maxReconnects = 1000
seaweedfs-mount-template.conf: |
exec.start: "/bin/sh /etc/rc"
exec.stop: "/bin/sh /etc/rc.shutdown jail"
mount.devfs
persist
allow.mount
allow.mount.fusefs
enforce_statfs: 1
seaweedfs-mount.sh: |
daemon -t "Distributed Object Store and Filesystem (mount)" -p /weed/.pid -o /weed/.log \
/weed/weed mount -dir=/mnt -filer=${MOUNT_FILERS}
seaweedfs-mount.makejail: |
OPTION mount_devfs
OPTION device=include \$devfsrules_hide_all
OPTION device=include \$devfsrules_unhide_basic
OPTION device=include \$devfsrules_unhide_login
OPTION device=path fuse unhide
OPTION template=${OVERLORD_METADATA}/seaweedfs-mount-template.conf
INCLUDE gh+DtxdF/efficient-makejail
INCLUDE gh+AppJail-makejails/seaweedfs
COPY ${OVERLORD_METADATA}/seaweedfs-mount.sh /seaweedfs-mount.sh
CMD chmod +x /seaweedfs-mount.sh
STOP
STAGE start
RUN /seaweedfs-mount.shThe metadata will be stored in several chains. Not all chains will use all the metadata we define, but for convenience we will include them all in a single deployment file.
Although it contains dozens of lines, it is very easy to understand. Each Makejail will copy a script from the host to the jail and run them in the start stage using RUN which will allow us to define environment variables from the Director file that we will see later.
For the case of the filer, its configuration file is included which is configured to use an Etcd cluster. Feel free to use whatever you want, but remember that all filers must use the same backend.
Last but not least, the mount operation is important if we want to mount the file system. It will be mounted in the /mnt directory inside the jail and as this operation requires FUSE, the device is unhidden. We also need a template to use FUSE correctly. You can exclude this part if you prefer to perform this operation from the host (e.g.: from the fstab(5) file) or you will simply use SeaweedFS otherwise.
overlord apply -f metadata.ymlAfter deploying the necessary metadata, it is the master's turn. SeaweedFS can be deployed using a single process that implements all the components (master, volume, filer, ...) which is fine for simple use cases, but in our case we want to use all the machines we have located in different parts of our country.
master.yml
kind: directorProject
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- desktop
- r2
- carl
projectName: seaweedfs-master
projectFile: |
options:
- alias:
- ip4_inherit:
services:
master:
makejail: !ENV '${OVERLORD_METADATA}/seaweedfs-master.makejail'
start-environment:
- MASTER_IP: !ENV '${MASTER_IP}'
- MASTER_PEERS: !ENV '${MASTER_PEERS}'
options:
- label: 'appjail.dns.alt-name:seaweedfs-master'
- label: 'overlord.skydns:1'
- label: 'overlord.skydns.group:seaweedfs-master'
- label: 'overlord.skydns.interface:tailscale0'
arguments:
- seaweedfs_tag: '14.2'
volumes:
- data: seaweedfs-data
default_volume_type: '<volumefs>'
volumes:
data:
device: '/var/appjail-volumes/seaweedfs/master'
environment:
MASTER_PEERS: '100.65.139.52:9333,100.109.0.125:9333,100.126.247.98:9333'
labelsEnvironment:
desktop:
MASTER_IP: '100.65.139.52'
r2:
MASTER_IP: '100.109.0.125'
carl:
MASTER_IP: '100.126.247.98'As you can see, the hard part is the metadata, but after that defining the deployment file is very easy and consumes only a few lines. In the above deployment file we will deploy the master in three chains. As in the same case as Etcd, SeaweedFS uses Raft so keep in mind the "odd is better" rule for these cases.
$ overlord apply -f master.yml
$ overlord get-info -f master -t projects --filter-per-project
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: None
labels:
- all
- desktop
- services
- vm-only
projects:
seaweedfs-master:
state: DONE
last_log: 2025-05-23_18h51m54s
locked: False
services:
- {'name': 'master', 'status': 0, 'jail': 'b66003256b'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 2 minutes and 50.83 seconds
job_id: 4
restarted: False
labels:
error: False
message: None
load-balancer:
services:
master:
error: False
message: None
skydns:
services:
master:
error: False
message: (project:seaweedfs-master, service:master, records:[address:True,ptr:None,srv:None] records has been updated.
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: provider.carl
labels:
- all
- carl
- services
projects:
seaweedfs-master:
state: DONE
last_log: 2025-05-23_18h51m53s
locked: False
services:
- {'name': 'master', 'status': 0, 'jail': '7ee95b5447'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 2 minutes and 41.26 seconds
job_id: 43
restarted: False
labels:
error: False
message: None
load-balancer:
services:
master:
error: False
message: None
skydns:
services:
master:
error: False
message: (project:seaweedfs-master, service:master, records:[address:True,ptr:None,srv:None] records has been updated.
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: r2
labels:
- all
- r2
- services
projects:
seaweedfs-master:
state: DONE
last_log: 2025-05-23_18h51m56s
locked: False
services:
- {'name': 'master', 'status': 0, 'jail': '1ac3dc7115'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 29.9 seconds
job_id: 32
restarted: False
labels:
error: False
message: None
load-balancer:
services:
master:
error: False
message: None
skydns:
services:
master:
error: False
message: (project:seaweedfs-master, service:master, records:[address:True,ptr:None,srv:None] records has been updated.It's time to deploy the volume. The master is a lightweight part of SeaweedFS and I have deployed it on some machines with low resources and it works fine, however the volume is a crucial part, you can deploy it on machines with slow storage, but it is recommended to deploy it on the fastest one you have. In my case the volume will be deployed on chains that have SSDs.
volume.yml
kind: directorProject
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- desktop
- carl
projectName: seaweedfs-volume
projectFile: |
options:
- alias:
- ip4_inherit:
services:
volume:
makejail: !ENV '${OVERLORD_METADATA}/seaweedfs-volume.makejail'
start-environment:
- VOLUME_IP: !ENV '${VOLUME_IP}'
- VOLUME_PEERS: !ENV '${VOLUME_PEERS}'
- VOLUME_DATACENTER: !ENV '${VOLUME_DATACENTER}'
- VOLUME_RACK: !ENV '${VOLUME_RACK}'
options:
- label: 'appjail.dns.alt-name:seaweedfs-volume'
- label: 'overlord.skydns:1'
- label: 'overlord.skydns.group:seaweedfs-volume'
- label: 'overlord.skydns.interface:tailscale0'
arguments:
- seaweedfs_tag: '14.2'
volumes:
- data: seaweedfs-data
default_volume_type: '<volumefs>'
volumes:
data:
device: '/var/appjail-volumes/seaweedfs/volume'
environment:
VOLUME_PEERS: '100.65.139.52:9333,100.109.0.125:9333,100.126.247.98:9333'
labelsEnvironment:
desktop:
VOLUME_IP: '100.65.139.52'
VOLUME_DATACENTER: 'air'
VOLUME_RACK: 'rack001'
carl:
VOLUME_IP: '100.126.247.98'
VOLUME_DATACENTER: 'earth'
VOLUME_RACK: 'rack001'console:
$ overlord apply -f volume.yml
$ overlord get-info -f volume.yml -t projects --filter-per-project
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: None
labels:
- all
- desktop
- services
- vm-only
projects:
seaweedfs-volume:
state: DONE
last_log: 2025-05-23_19h07m05s
locked: False
services:
- {'name': 'volume', 'status': 0, 'jail': '93ab2b7d5a'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 2 minutes and 7.21 seconds
job_id: 5
restarted: False
labels:
error: False
message: None
load-balancer:
services:
volume:
error: False
message: None
skydns:
services:
volume:
error: False
message: (project:seaweedfs-volume, service:volume, records:[address:True,ptr:None,srv:None] records has been updated.
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: provider.carl
labels:
- all
- carl
- services
projects:
seaweedfs-volume:
state: DONE
last_log: 2025-05-23_19h07m04s
locked: False
services:
- {'name': 'volume', 'status': 0, 'jail': '4e93f1d391'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 1 minute and 50.21 seconds
job_id: 44
restarted: False
labels:
error: False
message: None
load-balancer:
services:
volume:
error: False
message: None
skydns:
services:
volume:
error: False
message: (project:seaweedfs-volume, service:volume, records:[address:True,ptr:None,srv:None] records has been updated.The filer provides the convenient abstraction to perform common operations that normally appear in file systems. For the next deployment this will become much more important.
filer.yml:
kind: directorProject
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- centralita
- r2
projectName: seaweedfs-filer
projectFile: |
options:
- alias:
- ip4_inherit:
services:
filer:
makejail: !ENV '${OVERLORD_METADATA}/seaweedfs-filer.makejail'
start-environment:
- FILER_IP: !ENV '${FILER_IP}'
- FILER_PEERS: !ENV '${FILER_PEERS}'
- FILER_DATACENTER: !ENV '${FILER_DATACENTER}'
- FILER_RACK: !ENV '${FILER_RACK}'
options:
- label: 'appjail.dns.alt-name:seaweedfs-filer'
- label: 'overlord.skydns:1'
- label: 'overlord.skydns.group:seaweedfs-filer'
- label: 'overlord.skydns.interface:tailscale0'
arguments:
- seaweedfs_tag: '14.2'
environment:
FILER_PEERS: '100.65.139.52:9333,100.109.0.125:9333,100.126.247.98:9333'
labelsEnvironment:
centralita:
FILER_IP: '100.96.18.2'
FILER_DATACENTER: 'earth'
FILER_RACK: 'rack001'
r2:
FILER_IP: '100.109.0.125'
FILER_DATACENTER: 'air'
FILER_RACK: 'rack001'console:
$ overlord apply -f filer.yml
$ overlord get-info -f filer.yml -t projects --filter-per-project
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: centralita
labels:
- all
- centralita
- services
projects:
seaweedfs-filer:
state: DONE
last_log: 2025-05-23_19h17m28s
locked: False
services:
- {'name': 'filer', 'status': 0, 'jail': '9010cbea14'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 2 minutes and 5.04 seconds
job_id: 51
restarted: False
labels:
error: False
message: None
load-balancer:
services:
filer:
error: False
message: None
skydns:
services:
filer:
error: False
message: (project:seaweedfs-filer, service:filer, records:[address:True,ptr:None,srv:None] records has been updated.
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: r2
labels:
- all
- r2
- services
projects:
seaweedfs-filer:
state: DONE
last_log: 2025-05-23_19h17m30s
locked: False
services:
- {'name': 'filer', 'status': 0, 'jail': 'af6f9461fc'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 32.5 seconds
job_id: 33
restarted: False
labels:
error: False
message: None
load-balancer:
services:
filer:
error: False
message: None
skydns:
services:
filer:
error: False
message: (project:seaweedfs-filer, service:filer, records:[address:True,ptr:None,srv:None] records has been updated.The next operation we will need to do is to mount the file system using the filers. This can be done on each chain that requires writing to the mounted file system. This is very convenient in combination with volumes created by AppJail.
mount.yml:
kind: directorProject
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- desktop
- r2
- centralita
- carl
projectName: seaweedfs-mount
projectFile: |
options:
- alias:
- ip4_inherit:
services:
mount:
name: 'seaweedfs-mount'
makejail: !ENV '${OVERLORD_METADATA}/seaweedfs-mount.makejail'
start-environment:
- MOUNT_FILERS: !ENV '${MOUNT_FILERS}'
arguments:
- seaweedfs_tag: '14.2'
environment:
MOUNT_FILERS: '100.96.18.2:8889,100.109.0.125:8889'Unlike other deployments, in this case we have specified the name of the jail. This is important if we want to mount volumes from other jails as we will see later.
$ overlord apply -f mount.yml
$ overlord get-info -f mount.yml -t projects --filter-per-project
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: None
labels:
- all
- desktop
- services
- vm-only
projects:
seaweedfs-mount:
state: DONE
last_log: 2025-05-23_19h45m34s
locked: False
services:
- {'name': 'mount', 'status': 0, 'jail': 'seaweedfs-mount'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 3 minutes and 21.84 seconds
job_id: 8
restarted: False
labels:
error: False
message: None
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: provider.carl
labels:
- all
- carl
- services
projects:
seaweedfs-mount:
state: DONE
last_log: 2025-05-23_19h45m33s
locked: False
services:
- {'name': 'mount', 'status': 0, 'jail': 'seaweedfs-mount'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 3 minutes and 11.9 seconds
job_id: 47
restarted: False
labels:
error: False
message: None
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: centralita
labels:
- all
- centralita
- services
projects:
seaweedfs-mount:
state: DONE
last_log: 2025-05-23_19h45m35s
locked: False
services:
- {'name': 'mount', 'status': 0, 'jail': 'seaweedfs-mount'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 2 minutes and 38.61 seconds
job_id: 54
restarted: False
labels:
error: False
message: None
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: r2
labels:
- all
- r2
- services
projects:
seaweedfs-mount:
state: DONE
last_log: 2025-05-23_19h45m37s
locked: False
services:
- {'name': 'mount', 'status': 0, 'jail': 'seaweedfs-mount'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 44.46 seconds
job_id: 36
restarted: False
labels:
error: False
message: NoneAll this walking around is worth it. Now we can deploy jails in different chains but from the user's point of view it looks like the application is one.
app.yml:
kind: directorProject
datacenters:
main:
entrypoint: !ENV '${ENTRYPOINT}'
access_token: !ENV '${TOKEN}'
deployIn:
labels:
- r2
- carl
projectName: simple-web-app
projectFile: |
options:
- virtualnet: ':<random> default'
- nat:
services:
darkhttpd:
makejail: 'gh+AppJail-makejails/darkhttpd'
options:
- priority: 10
- expose: '9281:80 ext_if:tailscale0 on_if:tailscale0'
- label: 'appjail.dns.alt-name:simple-web-app'
- label: 'overlord.skydns:1'
- label: 'overlord.skydns.group:simple-web-app'
- label: 'overlord.skydns.interface:tailscale0'
arguments:
- darkhttpd_tag: 14.2
volumes:
- wwwdir: /usr/local/www/darkhttpd
volumes:
wwwdir:
device: !ENV '${DFS}/appjail-volumes/simple-web-app'
environment:
DFS: /usr/local/appjail/jails/seaweedfs-mount/jail/mntNote the priority option, this is important for applications that want to use the mounted file system as the seaweedfs-mount jail must be started first. Also note that any jail using SeaweedFS via the mounted file system may be affected if you remount the file system or restart the seaweedfs-mount jail. This can be minimized when mounting the SeaweedFS file system from the host, so any changes to, for example, the fstab(5), can be changed and after a reboot the system will make the changes. Choose the poison you prefer.
$ overlord apply -f app.yml
$ overlord get-info -f app.yml -t projects --filter-per-project
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: provider.carl
labels:
- all
- carl
- services
projects:
simple-web-app:
state: DONE
last_log: 2025-05-23_20h40m35s
locked: False
services:
- {'name': 'darkhttpd', 'status': 0, 'jail': '89a9a70cc1'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 6 minutes and 52.58 seconds
job_id: 48
restarted: False
labels:
error: False
message: None
load-balancer:
services:
darkhttpd:
error: False
message: None
skydns:
services:
darkhttpd:
error: False
message: (project:simple-web-app, service:darkhttpd, records:[address:True,ptr:None,srv:None] records has been updated.
datacenter: http://127.0.0.1:8888
entrypoint: main
chain: r2
labels:
- all
- r2
- services
projects:
simple-web-app:
state: DONE
last_log: 2025-05-23_20h40m39s
locked: False
services:
- {'name': 'darkhttpd', 'status': 0, 'jail': 'b591310b6c'}
up:
operation: COMPLETED
output:
rc: 0
stdout: {'errlevel': 0, 'message': None, 'failed': []}
last_update: 1 minute and 53.72 seconds
job_id: 37
restarted: False
labels:
error: False
message: None
load-balancer:
services:
darkhttpd:
error: False
message: None
skydns:
services:
darkhttpd:
error: False
message: (project:simple-web-app, service:darkhttpd, records:[address:True,ptr:None,srv:None] records has been updated.console:
# echo "Hello, world!" > /usr/local/appjail/jails/seaweedfs-mount/jail/mnt/appjail-volumes/simple-web-app/index.html
$ host simple-web-app.overlord.lan
simple-web-app.overlord.lan has address 100.126.247.98
simple-web-app.overlord.lan has address 100.109.0.125
$ curl http://simple-web-app.overlord.lan:9281/
Hello, world!
$ curl http://100.126.247.98:9281/
Hello, world!
$ curl http://100.109.0.125:9281/
Hello, world!