Skip to content

Commit 298d70e

Browse files
committed
feat: add apple-container runtime support
Add Apple Containers as an alternative runtime for Supabase CLI-managed services. Docker remains the default runtime, while apple-container can be selected anywhere the CLI manages the service lifecycle. The primary use case today is local development, but the runtime abstraction is not limited to that environment.
1 parent 24b7304 commit 298d70e

29 files changed

Lines changed: 2873 additions & 379 deletions

cmd/root.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ func recoverAndExit() {
202202
!viper.GetBool("DEBUG") {
203203
utils.CmdSuggestion = utils.SuggestDebugFlag
204204
}
205-
if e, ok := err.(*errors.Error); ok && len(utils.Version) == 0 {
205+
if e, ok := err.(*errors.Error); ok && viper.GetBool("DEBUG") {
206206
fmt.Fprintln(os.Stderr, string(e.Stack()))
207207
}
208208
msg = err.Error()
@@ -239,6 +239,7 @@ func init() {
239239
flags.String("workdir", "", "path to a Supabase project directory")
240240
flags.Bool("experimental", false, "enable experimental features")
241241
flags.String("network-id", "", "use the specified docker network instead of a generated one")
242+
flags.String("runtime", "", "container runtime for local development (docker|apple-container)")
242243
flags.String("profile", "supabase", "use a specific profile for connecting to Supabase API")
243244
flags.VarP(&utils.OutputFormat, "output", "o", "output format of status variables")
244245
flags.Var(&utils.DNSResolver, "dns-resolver", "lookup domain names using the specified resolver")

docs/supabase/start.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ Starts the Supabase local development stack.
44

55
Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`.
66

7+
Use `--runtime` to override the local container runtime for the current command. To make it persistent for the project, set `[local].runtime` in `supabase/config.toml`.
8+
79
All service containers are started by default. You can exclude those not needed by passing in `-x` flag. To exclude multiple containers, either pass in a comma separated string, such as `-x gotrue,imgproxy`, or specify `-x` flag multiple times.
810

911
> It is recommended to have at least 7GB of RAM to start all services.

docs/supabase/status.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,6 @@ Shows status of the Supabase local development stack.
44

55
Requires the local development stack to be started by running `supabase start` or `supabase db start`.
66

7+
The pretty output includes a runtime summary with the selected local runtime, project id, and tracked containers, networks, and volumes.
8+
79
You can export the connection parameters for [initializing supabase-js](https://supabase.com/docs/reference/javascript/initializing) locally by specifying the `-o env` flag. Supported parameters include `JWT_SECRET`, `ANON_KEY`, and `SERVICE_ROLE_KEY`.

docs/supabase/stop.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,6 @@ Stops the Supabase local development stack.
44

55
Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`.
66

7-
All Docker resources are maintained across restarts. Use `--no-backup` flag to reset your local development data between restarts.
7+
Local container resources are maintained across restarts for both the `docker` and `apple-container` runtimes. Use `--no-backup` flag to reset your local development data between restarts.
88

9-
Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data.
9+
Use the `--all` flag to stop all local Supabase projects instances on the machine. Use with caution with `--no-backup` as it will delete all supabase local projects data.

internal/db/reset/reset.go

Lines changed: 91 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -5,31 +5,47 @@ import (
55
_ "embed"
66
"fmt"
77
"io"
8+
"net"
89
"os"
910
"strconv"
1011
"strings"
1112
"time"
1213

1314
"github.com/cenkalti/backoff/v4"
1415
"github.com/containerd/errdefs"
15-
"github.com/docker/docker/api/types"
16-
"github.com/docker/docker/api/types/container"
1716
"github.com/docker/docker/api/types/network"
1817
"github.com/go-errors/errors"
1918
"github.com/jackc/pgconn"
2019
"github.com/jackc/pgerrcode"
2120
"github.com/jackc/pgx/v4"
2221
"github.com/spf13/afero"
23-
"github.com/supabase/cli/internal/db/start"
22+
dbstart "github.com/supabase/cli/internal/db/start"
2423
"github.com/supabase/cli/internal/migration/apply"
2524
"github.com/supabase/cli/internal/migration/down"
2625
"github.com/supabase/cli/internal/migration/list"
2726
"github.com/supabase/cli/internal/migration/repair"
2827
"github.com/supabase/cli/internal/seed/buckets"
28+
stackstart "github.com/supabase/cli/internal/start"
2929
"github.com/supabase/cli/internal/utils"
3030
"github.com/supabase/cli/pkg/migration"
3131
)
3232

33+
var (
34+
assertSupabaseDbIsRunning = utils.AssertSupabaseDbIsRunning
35+
removeContainer = utils.RemoveContainer
36+
removeVolume = utils.RemoveVolume
37+
startContainer = utils.DockerStart
38+
inspectContainer = utils.InspectContainer
39+
restartContainer = utils.RestartContainer
40+
waitForHealthyService = dbstart.WaitForHealthyService
41+
waitForLocalDatabase = waitForDatabaseReady
42+
waitForLocalAPI = waitForAPIReady
43+
setupLocalDatabase = dbstart.SetupLocalDatabase
44+
restartKong = stackstart.RestartKong
45+
runBucketSeed = buckets.Run
46+
seedBuckets = seedBucketsWithRetry
47+
)
48+
3349
func Run(ctx context.Context, version string, last uint, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
3450
if len(version) > 0 {
3551
if _, err := strconv.Atoi(version); err != nil {
@@ -54,21 +70,38 @@ func Run(ctx context.Context, version string, last uint, config pgconn.Config, f
5470
return resetRemote(ctx, version, config, fsys, options...)
5571
}
5672
// Config file is loaded before parsing --linked or --local flags
57-
if err := utils.AssertSupabaseDbIsRunning(); err != nil {
73+
if err := assertSupabaseDbIsRunning(); err != nil {
5874
return err
5975
}
6076
// Reset postgres database because extensions (pg_cron, pg_net) require postgres
6177
if err := resetDatabase(ctx, version, fsys, options...); err != nil {
6278
return err
6379
}
6480
// Seed objects from supabase/buckets directory
65-
if resp, err := utils.Docker.ContainerInspect(ctx, utils.StorageId); err == nil {
66-
if resp.State.Health == nil || resp.State.Health.Status != types.Healthy {
67-
if err := start.WaitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil {
81+
if _, err := inspectContainer(ctx, utils.StorageId); err == nil {
82+
if shouldRefreshAPIAfterReset() {
83+
// Kong caches upstream addresses; recreate it after the db container gets a new IP.
84+
if err := restartKong(ctx, stackstart.KongDependencies{
85+
Gotrue: utils.Config.Auth.Enabled,
86+
Rest: utils.Config.Api.Enabled,
87+
Realtime: utils.Config.Realtime.Enabled,
88+
Storage: utils.Config.Storage.Enabled,
89+
Studio: utils.Config.Studio.Enabled,
90+
Pgmeta: utils.Config.Studio.Enabled,
91+
Edge: true,
92+
Logflare: utils.Config.Analytics.Enabled,
93+
Pooler: utils.Config.Db.Pooler.Enabled,
94+
}); err != nil {
95+
return err
96+
}
97+
if err := waitForLocalAPI(ctx, 30*time.Second); err != nil {
6898
return err
6999
}
70100
}
71-
if err := buckets.Run(ctx, "", false, fsys); err != nil {
101+
if err := waitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil {
102+
return err
103+
}
104+
if err := seedBuckets(ctx, fsys); err != nil {
72105
return err
73106
}
74107
}
@@ -77,6 +110,10 @@ func Run(ctx context.Context, version string, last uint, config pgconn.Config, f
77110
return nil
78111
}
79112

113+
func shouldRefreshAPIAfterReset() bool {
114+
return utils.UsesAppleContainerRuntime() && utils.Config.Api.Enabled
115+
}
116+
80117
func resetDatabase(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
81118
fmt.Fprintln(os.Stderr, "Resetting local database"+toLogMessage(version))
82119
if utils.Config.Db.MajorVersion <= 14 {
@@ -111,14 +148,14 @@ func resetDatabase14(ctx context.Context, version string, fsys afero.Fs, options
111148
}
112149

113150
func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {
114-
if err := utils.Docker.ContainerRemove(ctx, utils.DbId, container.RemoveOptions{Force: true}); err != nil {
151+
if err := removeContainer(ctx, utils.DbId, true, true); err != nil {
115152
return errors.Errorf("failed to remove container: %w", err)
116153
}
117-
if err := utils.Docker.VolumeRemove(ctx, utils.DbId, true); err != nil {
154+
if err := removeVolume(ctx, utils.DbId, true); err != nil {
118155
return errors.Errorf("failed to remove volume: %w", err)
119156
}
120-
config := start.NewContainerConfig()
121-
hostConfig := start.NewHostConfig()
157+
config := dbstart.NewContainerConfig()
158+
hostConfig := dbstart.NewHostConfig()
122159
networkingConfig := network.NetworkingConfig{
123160
EndpointsConfig: map[string]*network.EndpointSettings{
124161
utils.NetId: {
@@ -127,13 +164,16 @@ func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options
127164
},
128165
}
129166
fmt.Fprintln(os.Stderr, "Recreating database...")
130-
if _, err := utils.DockerStart(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil {
167+
if _, err := startContainer(ctx, config, hostConfig, networkingConfig, utils.DbId); err != nil {
131168
return err
132169
}
133-
if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
170+
if err := waitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
134171
return err
135172
}
136-
if err := start.SetupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil {
173+
if err := waitForLocalDatabase(ctx, utils.Config.Db.HealthTimeout, options...); err != nil {
174+
return err
175+
}
176+
if err := setupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil {
137177
return err
138178
}
139179
fmt.Fprintln(os.Stderr, "Restarting containers...")
@@ -146,7 +186,7 @@ func initDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) error {
146186
return err
147187
}
148188
defer conn.Close(context.Background())
149-
return start.InitSchema14(ctx, conn)
189+
return dbstart.InitSchema14(ctx, conn)
150190
}
151191

152192
// Recreate postgres database by connecting to template1
@@ -193,7 +233,7 @@ func DisconnectClients(ctx context.Context, conn *pgx.Conn) error {
193233
}
194234
}
195235
// Wait for WAL senders to drop their replication slots
196-
policy := start.NewBackoffPolicy(ctx, 10*time.Second)
236+
policy := dbstart.NewBackoffPolicy(ctx, 10*time.Second)
197237
waitForDrop := func() error {
198238
var count int
199239
if err := conn.QueryRow(ctx, COUNT_REPLICATION_SLOTS).Scan(&count); err != nil {
@@ -211,20 +251,50 @@ func RestartDatabase(ctx context.Context, w io.Writer) error {
211251
fmt.Fprintln(w, "Restarting containers...")
212252
// Some extensions must be manually restarted after pg_terminate_backend
213253
// Ref: https://github.com/citusdata/pg_cron/issues/99
214-
if err := utils.Docker.ContainerRestart(ctx, utils.DbId, container.StopOptions{}); err != nil {
254+
if err := restartContainer(ctx, utils.DbId); err != nil {
215255
return errors.Errorf("failed to restart container: %w", err)
216256
}
217-
if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
257+
if err := waitForHealthyService(ctx, utils.Config.Db.HealthTimeout, utils.DbId); err != nil {
218258
return err
219259
}
220260
return restartServices(ctx)
221261
}
222262

263+
func waitForDatabaseReady(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) error {
264+
policy := dbstart.NewBackoffPolicy(ctx, timeout)
265+
return backoff.Retry(func() error {
266+
conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...)
267+
if err != nil {
268+
return err
269+
}
270+
return conn.Close(ctx)
271+
}, policy)
272+
}
273+
274+
func seedBucketsWithRetry(ctx context.Context, fsys afero.Fs) error {
275+
policy := dbstart.NewBackoffPolicy(ctx, 30*time.Second)
276+
return backoff.Retry(func() error {
277+
return runBucketSeed(ctx, "", false, fsys)
278+
}, policy)
279+
}
280+
281+
func waitForAPIReady(ctx context.Context, timeout time.Duration) error {
282+
addr := net.JoinHostPort(utils.Config.Hostname, strconv.FormatUint(uint64(utils.Config.Api.Port), 10))
283+
policy := dbstart.NewBackoffPolicy(ctx, timeout)
284+
return backoff.Retry(func() error {
285+
conn, err := net.DialTimeout("tcp", addr, time.Second)
286+
if err != nil {
287+
return err
288+
}
289+
return conn.Close()
290+
}, policy)
291+
}
292+
223293
func restartServices(ctx context.Context) error {
224294
// No need to restart PostgREST because it automatically reconnects and listens for schema changes
225295
services := listServicesToRestart()
226296
result := utils.WaitAll(services, func(id string) error {
227-
if err := utils.Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil && !errdefs.IsNotFound(err) {
297+
if err := restartContainer(ctx, id); err != nil && !errdefs.IsNotFound(err) {
228298
return errors.Errorf("failed to restart %s: %w", id, err)
229299
}
230300
return nil
@@ -234,7 +304,7 @@ func restartServices(ctx context.Context) error {
234304
}
235305

236306
func listServicesToRestart() []string {
237-
return []string{utils.StorageId, utils.GotrueId, utils.RealtimeId, utils.PoolerId}
307+
return []string{utils.StorageId, utils.GotrueId, utils.RealtimeId, utils.PoolerId, utils.KongId}
238308
}
239309

240310
func resetRemote(ctx context.Context, version string, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error {

0 commit comments

Comments
 (0)