From 01256a1721f04e3bd8e032340a530870c7d67539 Mon Sep 17 00:00:00 2001 From: Han Qiao Date: Tue, 24 Sep 2024 17:19:42 +0800 Subject: [PATCH 01/28] feat: support remote config overrides (#2704) * feat: setup basics for branch config override * fix: use pointers for falsy values to determine emptyness * chore: refactor turn Auth.EnableSignup into pointer * wip: attemps non pointer approach * fix: use direct toml parsing to distinguish undefined values * chore: restore gomod * chore: refactor to a single function leverage mergo * chore: fix lint * fix: add branch override logic to LoadConfigFs * fix: inverted logic * chore: add env branch override test * chore: add test for slices merging * chore: remote config overrides * chore: validate project id * Apply suggestions from code review Co-authored-by: Andrew Valleteau --------- Co-authored-by: avallete Co-authored-by: Andrew Valleteau --- pkg/config/config.go | 57 +++++++++++++++++++++++++++++---- pkg/config/config_test.go | 35 ++++++++++++++++++++ pkg/config/testdata/config.toml | 14 ++++++++ 3 files changed, 99 insertions(+), 7 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index db441e9bc..18414546a 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/fs" + "maps" "net" "net/http" "net/url" @@ -119,7 +120,8 @@ func (c CustomClaims) NewToken() *jwt.Token { // // Default values for internal configs should be added to `var Config` initializer. type ( - config struct { + // Common config fields between our "base" config and any "remote" branch specific + baseConfig struct { ProjectId string `toml:"project_id"` Hostname string `toml:"-"` Api api `toml:"api"` @@ -135,6 +137,12 @@ type ( Experimental experimental `toml:"experimental" mapstructure:"-"` } + config struct { + baseConfig + Overrides map[string]interface{} `toml:"remotes"` + Remotes map[string]baseConfig `toml:"-"` + } + api struct { Enabled bool `toml:"enabled"` Image string `toml:"-"` @@ -438,6 +446,16 @@ type ( } ) +func (c *baseConfig) Clone() baseConfig { + copy := *c + copy.Storage.Buckets = maps.Clone(c.Storage.Buckets) + copy.Functions = maps.Clone(c.Functions) + copy.Auth.External = maps.Clone(c.Auth.External) + copy.Auth.Email.Template = maps.Clone(c.Auth.Email.Template) + copy.Auth.Sms.TestOTP = maps.Clone(c.Auth.Sms.TestOTP) + return copy +} + type ConfigEditor func(*config) func WithHostname(hostname string) ConfigEditor { @@ -447,7 +465,7 @@ func WithHostname(hostname string) ConfigEditor { } func NewConfig(editors ...ConfigEditor) config { - initial := config{ + initial := config{baseConfig: baseConfig{ Hostname: "127.0.0.1", Api: api{ Image: postgrestImage, @@ -543,7 +561,7 @@ func NewConfig(editors ...ConfigEditor) config { EdgeRuntime: edgeRuntime{ Image: edgeRuntimeImage, }, - } + }} for _, apply := range editors { apply(&initial) } @@ -587,7 +605,6 @@ func (c *config) Load(path string, fsys fs.FS) error { if _, err := dec.Decode(c); err != nil { return errors.Errorf("failed to decode config template: %w", err) } - // Load user defined config if metadata, err := toml.DecodeFS(fsys, builder.ConfigPath, c); err != nil { cwd, osErr := os.Getwd() if osErr != nil { @@ -595,7 +612,11 @@ func (c *config) Load(path string, fsys fs.FS) error { } return errors.Errorf("cannot read config in %s: %w", cwd, err) } else if undecoded := metadata.Undecoded(); len(undecoded) > 0 { - fmt.Fprintf(os.Stderr, "Unknown config fields: %+v\n", undecoded) + for _, key := range undecoded { + if key[0] != "remotes" { + fmt.Fprintf(os.Stderr, "Unknown config field: [%s]\n", key) + } + } } // Load secrets from .env file if err := loadDefaultEnv(); err != nil { @@ -685,10 +706,32 @@ func (c *config) Load(path string, fsys fs.FS) error { } c.Functions[slug] = function } - return c.Validate() + if err := c.baseConfig.Validate(); err != nil { + return err + } + c.Remotes = make(map[string]baseConfig, len(c.Overrides)) + for name, remote := range c.Overrides { + base := c.baseConfig.Clone() + // Encode a toml file with only config overrides + var buf bytes.Buffer + if err := toml.NewEncoder(&buf).Encode(remote); err != nil { + return errors.Errorf("failed to encode map to TOML: %w", err) + } + // Decode overrides using base config as defaults + if metadata, err := toml.NewDecoder(&buf).Decode(&base); err != nil { + return errors.Errorf("failed to decode remote config: %w", err) + } else if undecoded := metadata.Undecoded(); len(undecoded) > 0 { + fmt.Fprintf(os.Stderr, "Unknown config fields: %+v\n", undecoded) + } + if err := base.Validate(); err != nil { + return err + } + c.Remotes[name] = base + } + return nil } -func (c *config) Validate() error { +func (c *baseConfig) Validate() error { if c.ProjectId == "" { return errors.New("Missing required field in config: project_id") } else if sanitized := sanitizeProjectId(c.ProjectId); sanitized != c.ProjectId { diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 29a605059..733b3c7b7 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -55,6 +55,41 @@ func TestConfigParsing(t *testing.T) { // Run test assert.Error(t, config.Load("", fsys)) }) + + t.Run("config file with remotes", func(t *testing.T) { + config := NewConfig() + // Setup in-memory fs + fsys := fs.MapFS{ + "supabase/config.toml": &fs.MapFile{Data: testInitConfigEmbed}, + "supabase/templates/invite.html": &fs.MapFile{}, + } + // Run test + t.Setenv("TWILIO_AUTH_TOKEN", "token") + t.Setenv("AZURE_CLIENT_ID", "hello") + t.Setenv("AZURE_SECRET", "this is cool") + t.Setenv("AUTH_SEND_SMS_SECRETS", "v1,whsec_aWxpa2VzdXBhYmFzZXZlcnltdWNoYW5kaWhvcGV5b3Vkb3Rvbw==") + t.Setenv("SENDGRID_API_KEY", "sendgrid") + assert.NoError(t, config.Load("", fsys)) + // Check the default value in the config + assert.Equal(t, "http://127.0.0.1:3000", config.Auth.SiteUrl) + assert.Equal(t, true, config.Auth.EnableSignup) + assert.Equal(t, true, config.Auth.External["azure"].Enabled) + assert.Equal(t, []string{"image/png", "image/jpeg"}, config.Storage.Buckets["images"].AllowedMimeTypes) + // Check the values for remotes override + production, ok := config.Remotes["production"] + assert.True(t, ok) + staging, ok := config.Remotes["staging"] + assert.True(t, ok) + // Check the values for production override + assert.Equal(t, config.ProjectId, production.ProjectId) + assert.Equal(t, "http://feature-auth-branch.com/", production.Auth.SiteUrl) + assert.Equal(t, false, production.Auth.EnableSignup) + assert.Equal(t, false, production.Auth.External["azure"].Enabled) + assert.Equal(t, "nope", production.Auth.External["azure"].ClientId) + // Check the values for the staging override + assert.Equal(t, "staging-project", staging.ProjectId) + assert.Equal(t, []string{"image/png"}, staging.Storage.Buckets["images"].AllowedMimeTypes) + }) } func TestFileSizeLimitConfigParsing(t *testing.T) { diff --git a/pkg/config/testdata/config.toml b/pkg/config/testdata/config.toml index f7061c1e7..a7aa36544 100644 --- a/pkg/config/testdata/config.toml +++ b/pkg/config/testdata/config.toml @@ -217,3 +217,17 @@ s3_region = "ap-southeast-1" s3_access_key = "" # Configures AWS_SECRET_ACCESS_KEY for S3 bucket s3_secret_key = "" + +[remotes.production.auth] +site_url = "http://feature-auth-branch.com/" +enable_signup = false + +[remotes.production.auth.external.azure] +enabled = false +client_id = "nope" + +[remotes.staging] +project_id = "staging-project" + +[remotes.staging.storage.buckets.images] +allowed_mime_types = ["image/png"] From 11c5004fcbe963aae9e7a48e3884018d179a1a95 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Tue, 24 Sep 2024 12:05:22 +0200 Subject: [PATCH 02/28] fix: disable security opts for db test on bitbucket runner (#2705) hotfix(cli): disable security opts for db test on bitbucket runner --- internal/utils/docker.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/utils/docker.go b/internal/utils/docker.go index 80106583b..6264c4056 100644 --- a/internal/utils/docker.go +++ b/internal/utils/docker.go @@ -287,6 +287,9 @@ func DockerStart(ctx context.Context, config container.Config, hostConfig contai // Skip named volume for BitBucket pipeline if os.Getenv("BITBUCKET_CLONE_DIR") != "" { hostConfig.Binds = binds + // Bitbucket doesn't allow for --security-opt option to be set + // https://support.atlassian.com/bitbucket-cloud/docs/run-docker-commands-in-bitbucket-pipelines/#Full-list-of-restricted-commands + hostConfig.SecurityOpt = nil } else { // Create named volumes with labels for _, name := range sources { From 2ad2d330be6a0b8e74041f7bf725fdeff39a0358 Mon Sep 17 00:00:00 2001 From: Ivan Vasilov Date: Tue, 24 Sep 2024 14:49:29 +0200 Subject: [PATCH 03/28] fix: Bump studio to the latest image version 20240923 (#2706) Update studio version to 20240923 --- pkg/config/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/constants.go b/pkg/config/constants.go index 93331225a..d75ce61bc 100644 --- a/pkg/config/constants.go +++ b/pkg/config/constants.go @@ -10,7 +10,7 @@ const ( inbucketImage = "inbucket/inbucket:3.0.3" postgrestImage = "postgrest/postgrest:v12.2.0" pgmetaImage = "supabase/postgres-meta:v0.83.2" - studioImage = "supabase/studio:20240729-ce42139" + studioImage = "supabase/studio:20240923-2e3e90c" imageProxyImage = "darthsim/imgproxy:v3.8.0" edgeRuntimeImage = "supabase/edge-runtime:v1.58.3" vectorImage = "timberio/vector:0.28.1-alpine" From bb28463e6b75264dc97b2874a4b15d3561c23152 Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 24 Sep 2024 10:58:37 +0200 Subject: [PATCH 04/28] chore: use _supabase database for internals This is to avoid overloading our user postgres database with every new addition to _analytics or _realtime --- internal/db/dump/dump.go | 3 --- internal/db/reset/reset.go | 2 ++ internal/db/start/start.go | 2 +- internal/db/start/templates/schema.sql | 24 ++++++++++++++++-------- internal/start/start.go | 8 ++++---- 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/internal/db/dump/dump.go b/internal/db/dump/dump.go index a52b1dc41..e75cbb8b9 100644 --- a/internal/db/dump/dump.go +++ b/internal/db/dump/dump.go @@ -100,9 +100,6 @@ func dumpData(ctx context.Context, config pgconn.Config, schema, excludeTable [] // "storage", // "supabase_functions", "supabase_migrations", - "_analytics", - "_realtime", - "_supavisor", } var env []string if len(schema) > 0 { diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index 62c3d3350..89a33fda5 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -170,7 +170,9 @@ func recreateDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) err sql := migration.MigrationFile{ Statements: []string{ "DROP DATABASE IF EXISTS postgres WITH (FORCE)", + "DROP DATABASE IF EXISTS _supabase WITH (FORCE)", "CREATE DATABASE postgres WITH OWNER postgres", + "CREATE DATABASE _supabase WITH OWNER postgres", }, } return sql.ExecBatch(ctx, conn) diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 3039b04d9..c25aa24e1 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -228,7 +228,7 @@ func initRealtimeJob(host string) utils.DockerJob { "DB_PORT=5432", "DB_USER=supabase_admin", "DB_PASSWORD=" + utils.Config.Db.Password, - "DB_NAME=postgres", + "DB_NAME=_supabase", "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, diff --git a/internal/db/start/templates/schema.sql b/internal/db/start/templates/schema.sql index 0d0010231..418f0ee05 100644 --- a/internal/db/start/templates/schema.sql +++ b/internal/db/start/templates/schema.sql @@ -12,14 +12,22 @@ ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_replication_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_read_only_user WITH PASSWORD :'pgpass'; -create schema if not exists _realtime; -alter schema _realtime owner to postgres; - -create schema if not exists _analytics; -alter schema _analytics owner to postgres; - -create schema if not exists _supavisor; -alter schema _supavisor owner to postgres; +CREATE DATABASE _supabase WITH OWNER postgres; +-- Connect to the _supabase database +\c _supabase +-- Create schemas in _supabase database for +-- internals tools and reports to not overload user database +-- with non-user activity +CREATE SCHEMA IF NOT EXISTS _realtime; +ALTER SCHEMA _realtime OWNER TO postgres; + +CREATE SCHEMA IF NOT EXISTS _analytics; +ALTER SCHEMA _analytics OWNER TO postgres; + +CREATE SCHEMA IF NOT EXISTS _supavisor; +ALTER SCHEMA _supavisor OWNER TO postgres; +-- Switch back to the main database +\c postgres BEGIN; diff --git a/internal/start/start.go b/internal/start/start.go index 2c3017ce5..413b8e2ed 100644 --- a/internal/start/start.go +++ b/internal/start/start.go @@ -195,7 +195,7 @@ func run(p utils.Program, ctx context.Context, fsys afero.Fs, excludedContainers // Start Logflare if utils.Config.Analytics.Enabled && !isContainerExcluded(utils.Config.Analytics.Image, excluded) { env := []string{ - "DB_DATABASE=" + dbConfig.Database, + "DB_DATABASE=_supabase", "DB_HOSTNAME=" + dbConfig.Host, fmt.Sprintf("DB_PORT=%d", dbConfig.Port), "DB_SCHEMA=_analytics", @@ -228,7 +228,7 @@ func run(p utils.Program, ctx context.Context, fsys afero.Fs, excludedContainers ) case config.LogflarePostgres: env = append(env, - fmt.Sprintf("POSTGRES_BACKEND_URL=postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database), + fmt.Sprintf("POSTGRES_BACKEND_URL=postgresql://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"), "POSTGRES_BACKEND_SCHEMA=_analytics", ) } @@ -750,7 +750,7 @@ EOF fmt.Sprintf("DB_PORT=%d", dbConfig.Port), "DB_USER=supabase_admin", "DB_PASSWORD=" + dbConfig.Password, - "DB_NAME=" + dbConfig.Database, + "DB_NAME=_supabase", "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, @@ -1045,7 +1045,7 @@ EOF "PORT=4000", fmt.Sprintf("PROXY_PORT_SESSION=%d", portSession), fmt.Sprintf("PROXY_PORT_TRANSACTION=%d", portTransaction), - fmt.Sprintf("DATABASE_URL=ecto://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, dbConfig.Database), + fmt.Sprintf("DATABASE_URL=ecto://%s:%s@%s:%d/%s", dbConfig.User, dbConfig.Password, dbConfig.Host, dbConfig.Port, "_supabase"), "CLUSTER_POSTGRES=true", "SECRET_KEY_BASE=" + utils.Config.Db.Pooler.SecretKeyBase, "VAULT_ENC_KEY=" + utils.Config.Db.Pooler.EncryptionKey, From 5e946870f2127d6e4bbf55edc0b4252f08f12148 Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 24 Sep 2024 15:15:26 +0200 Subject: [PATCH 05/28] chore: fix tests mocks --- internal/db/reset/reset.go | 2 +- internal/db/reset/reset_test.go | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index 89a33fda5..d53392ae9 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -170,8 +170,8 @@ func recreateDatabase(ctx context.Context, options ...func(*pgx.ConnConfig)) err sql := migration.MigrationFile{ Statements: []string{ "DROP DATABASE IF EXISTS postgres WITH (FORCE)", - "DROP DATABASE IF EXISTS _supabase WITH (FORCE)", "CREATE DATABASE postgres WITH OWNER postgres", + "DROP DATABASE IF EXISTS _supabase WITH (FORCE)", "CREATE DATABASE _supabase WITH OWNER postgres", }, } diff --git a/internal/db/reset/reset_test.go b/internal/db/reset/reset_test.go index 03da968c5..ffb9898cf 100644 --- a/internal/db/reset/reset_test.go +++ b/internal/db/reset/reset_test.go @@ -145,6 +145,10 @@ func TestRecreateDatabase(t *testing.T) { Query("DROP DATABASE IF EXISTS postgres WITH (FORCE)"). Reply("DROP DATABASE"). Query("CREATE DATABASE postgres WITH OWNER postgres"). + Reply("CREATE DATABASE"). + Query("DROP DATABASE IF EXISTS _supabase WITH (FORCE)"). + Reply("DROP DATABASE"). + Query("CREATE DATABASE _supabase WITH OWNER postgres"). Reply("CREATE DATABASE") // Run test assert.NoError(t, recreateDatabase(context.Background(), conn.Intercept)) @@ -194,8 +198,11 @@ func TestRecreateDatabase(t *testing.T) { Reply("DO"). Query("DROP DATABASE IF EXISTS postgres WITH (FORCE)"). ReplyError(pgerrcode.ObjectInUse, `database "postgres" is used by an active logical replication slot`). - Query("CREATE DATABASE postgres WITH OWNER postgres") - // Run test + Query("CREATE DATABASE postgres WITH OWNER postgres"). + Query("DROP DATABASE IF EXISTS _supabase WITH (FORCE)"). + Reply("DROP DATABASE"). + Query("CREATE DATABASE _supabase WITH OWNER postgres"). + Reply("CREATE DATABASE") err := recreateDatabase(context.Background(), conn.Intercept) // Check error assert.ErrorContains(t, err, `ERROR: database "postgres" is used by an active logical replication slot (SQLSTATE 55006)`) From e6788f3bc03e91af979c000435cf6ba4610c3dcf Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 24 Sep 2024 18:03:41 +0200 Subject: [PATCH 06/28] chore: add realtime schema as well --- internal/db/start/templates/schema.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/db/start/templates/schema.sql b/internal/db/start/templates/schema.sql index 418f0ee05..45a936a2e 100644 --- a/internal/db/start/templates/schema.sql +++ b/internal/db/start/templates/schema.sql @@ -20,6 +20,8 @@ CREATE DATABASE _supabase WITH OWNER postgres; -- with non-user activity CREATE SCHEMA IF NOT EXISTS _realtime; ALTER SCHEMA _realtime OWNER TO postgres; +CREATE SCHEMA IF NOT EXISTS realtime; +ALTER SCHEMA realtime OWNER TO postgres; CREATE SCHEMA IF NOT EXISTS _analytics; ALTER SCHEMA _analytics OWNER TO postgres; From 76ef3d2d3ed394302a05bb67194aa30a0136ce87 Mon Sep 17 00:00:00 2001 From: avallete Date: Tue, 24 Sep 2024 19:25:36 +0200 Subject: [PATCH 07/28] fix: make the start work for postgres 14 and 13 --- internal/db/dump/dump.go | 4 ++++ internal/db/start/start.go | 11 +++++++++-- internal/db/start/templates/_supabase.sql | 18 ++++++++++++++++++ internal/db/start/templates/schema.sql | 19 ------------------- 4 files changed, 31 insertions(+), 21 deletions(-) create mode 100644 internal/db/start/templates/_supabase.sql diff --git a/internal/db/dump/dump.go b/internal/db/dump/dump.go index e75cbb8b9..94e2ba7af 100644 --- a/internal/db/dump/dump.go +++ b/internal/db/dump/dump.go @@ -100,6 +100,10 @@ func dumpData(ctx context.Context, config pgconn.Config, schema, excludeTable [] // "storage", // "supabase_functions", "supabase_migrations", + // TODO: Remove in a few version in favor of _supabase internal db + "_analytics", + "_realtime", + "_supavisor", } var env []string if len(schema) > 0 { diff --git a/internal/db/start/start.go b/internal/db/start/start.go index c25aa24e1..8ed608ad7 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -29,6 +29,8 @@ var ( HealthTimeout = 120 * time.Second //go:embed templates/schema.sql initialSchema string + //go:embed templates/_supabase.sql + _supabaseSchema string ) func Run(ctx context.Context, fsys afero.Fs) error { @@ -82,7 +84,7 @@ func NewContainerConfig() container.Config { Retries: 3, }, Entrypoint: []string{"sh", "-c", `cat <<'EOF' > /etc/postgresql.schema.sql && cat <<'EOF' > /etc/postgresql-custom/pgsodium_root.key && docker-entrypoint.sh postgres -D /etc/postgresql -` + initialSchema + ` +` + initialSchema + "\n" + _supabaseSchema + "\n" + ` EOF ` + utils.Config.Db.RootKey + ` EOF @@ -122,7 +124,12 @@ func StartDatabase(ctx context.Context, fsys afero.Fs, w io.Writer, options ...f }, } if utils.Config.Db.MajorVersion <= 14 { - config.Entrypoint = nil + config.Entrypoint = []string{"sh", "-c", ` + cat <<'EOF' > /docker-entrypoint-initdb.d/supabase_schema.sql +` + _supabaseSchema + ` +EOF + docker-entrypoint.sh postgres -D /etc/postgresql + `} hostConfig.Tmpfs = map[string]string{"/docker-entrypoint-initdb.d": ""} } // Creating volume will not override existing volume, so we must inspect explicitly diff --git a/internal/db/start/templates/_supabase.sql b/internal/db/start/templates/_supabase.sql new file mode 100644 index 000000000..d391166a2 --- /dev/null +++ b/internal/db/start/templates/_supabase.sql @@ -0,0 +1,18 @@ +CREATE DATABASE _supabase WITH OWNER postgres; + +-- Switch to the newly created _supabase database +\c _supabase +-- Create schemas in _supabase database for +-- internals tools and reports to not overload user database +-- with non-user activity +CREATE SCHEMA IF NOT EXISTS _realtime; +ALTER SCHEMA _realtime OWNER TO postgres; +CREATE SCHEMA IF NOT EXISTS realtime; +ALTER SCHEMA realtime OWNER TO postgres; + +CREATE SCHEMA IF NOT EXISTS _analytics; +ALTER SCHEMA _analytics OWNER TO postgres; + +CREATE SCHEMA IF NOT EXISTS _supavisor; +ALTER SCHEMA _supavisor OWNER TO postgres; +\c postgres \ No newline at end of file diff --git a/internal/db/start/templates/schema.sql b/internal/db/start/templates/schema.sql index 45a936a2e..2ff6cf48f 100644 --- a/internal/db/start/templates/schema.sql +++ b/internal/db/start/templates/schema.sql @@ -12,25 +12,6 @@ ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_replication_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_read_only_user WITH PASSWORD :'pgpass'; -CREATE DATABASE _supabase WITH OWNER postgres; --- Connect to the _supabase database -\c _supabase --- Create schemas in _supabase database for --- internals tools and reports to not overload user database --- with non-user activity -CREATE SCHEMA IF NOT EXISTS _realtime; -ALTER SCHEMA _realtime OWNER TO postgres; -CREATE SCHEMA IF NOT EXISTS realtime; -ALTER SCHEMA realtime OWNER TO postgres; - -CREATE SCHEMA IF NOT EXISTS _analytics; -ALTER SCHEMA _analytics OWNER TO postgres; - -CREATE SCHEMA IF NOT EXISTS _supavisor; -ALTER SCHEMA _supavisor OWNER TO postgres; --- Switch back to the main database -\c postgres - BEGIN; -- Create pg_net extension From fd04d07c60c8634326975b27b962bd729eaff182 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Wed, 25 Sep 2024 08:28:05 +0200 Subject: [PATCH 08/28] feat: add custom seed path to config (#2702) * chore: replace SeedDataPath by DefaultSeedDataPath * wip: add path seed matching logic * chore: add test for utils.GetSeedFiles * chore: wip tests mock * chore: fix lint * chore: show seed path * chore: change comment * chore: apply pr suggestions * chore: fix lint * chore: keep default value assignation * chore: remove DefaultSeedPath * chore: keep consistent WARNING message * chore: inline get seed file path * chore: address review comments --------- Co-authored-by: Qiao Han --- cmd/db.go | 2 +- cmd/start.go | 2 +- internal/db/push/push.go | 6 ++- internal/db/push/push_test.go | 4 +- internal/db/reset/reset_test.go | 2 +- internal/db/start/start_test.go | 3 +- internal/init/init.go | 18 +------ internal/init/init_test.go | 13 ----- internal/migration/apply/apply.go | 8 +-- internal/migration/apply/apply_test.go | 10 ++-- internal/utils/misc.go | 22 +++++++- internal/utils/misc_test.go | 73 ++++++++++++++++++++++++++ pkg/config/config.go | 6 ++- pkg/config/templates/config.toml | 8 +++ pkg/config/testdata/config.toml | 8 +++ pkg/config/utils.go | 2 - pkg/migration/seed.go | 3 +- 17 files changed, 140 insertions(+), 50 deletions(-) diff --git a/cmd/db.go b/cmd/db.go index 2ddb4072f..2d64edd74 100644 --- a/cmd/db.go +++ b/cmd/db.go @@ -276,7 +276,7 @@ func init() { pushFlags := dbPushCmd.Flags() pushFlags.BoolVar(&includeAll, "include-all", false, "Include all migrations not found on remote history table.") pushFlags.BoolVar(&includeRoles, "include-roles", false, "Include custom roles from "+utils.CustomRolesPath+".") - pushFlags.BoolVar(&includeSeed, "include-seed", false, "Include seed data from "+utils.SeedDataPath+".") + pushFlags.BoolVar(&includeSeed, "include-seed", false, "Include seed data from your config.") pushFlags.BoolVar(&dryRun, "dry-run", false, "Print the migrations that would be applied, but don't actually apply them.") pushFlags.String("db-url", "", "Pushes to the database specified by the connection string (must be percent-encoded).") pushFlags.Bool("linked", true, "Pushes to the linked project.") diff --git a/cmd/start.go b/cmd/start.go index ae3faf01e..a7af80e0c 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -27,7 +27,7 @@ func validateExcludedContainers(excludedContainers []string) { // Sort the names list so it's easier to visually spot the one you looking for sort.Strings(validContainers) warning := fmt.Sprintf("%s The following container names are not valid to exclude: %s\nValid containers to exclude are: %s\n", - utils.Yellow("Warning:"), + utils.Yellow("WARNING:"), utils.Aqua(strings.Join(invalidContainers, ", ")), utils.Aqua(strings.Join(validContainers, ", "))) fmt.Fprint(os.Stderr, warning) diff --git a/internal/db/push/push.go b/internal/db/push/push.go index 68e3be9ac..0bff15163 100644 --- a/internal/db/push/push.go +++ b/internal/db/push/push.go @@ -41,7 +41,11 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, fmt.Fprintln(os.Stderr, "Would push these migrations:") fmt.Fprint(os.Stderr, utils.Bold(confirmPushAll(pending))) if includeSeed { - fmt.Fprintln(os.Stderr, "Would seed data "+utils.Bold(utils.SeedDataPath)+"...") + seedPaths, err := utils.GetSeedFiles(fsys) + if err != nil { + return err + } + fmt.Fprintf(os.Stderr, "Would seed data %v...\n", seedPaths) } } else { msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index e4f6353ec..72df15ef9 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -162,7 +162,9 @@ func TestPushAll(t *testing.T) { t.Run("throws error on seed failure", func(t *testing.T) { // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: utils.SeedDataPath} + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + fsys := &fstest.OpenErrorFs{DenyPath: seedPath} + _, _ = fsys.Create(seedPath) path := filepath.Join(utils.MigrationsDir, "0_test.sql") require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) // Setup mock postgres diff --git a/internal/db/reset/reset_test.go b/internal/db/reset/reset_test.go index 03da968c5..0be21f3b2 100644 --- a/internal/db/reset/reset_test.go +++ b/internal/db/reset/reset_test.go @@ -362,7 +362,7 @@ func TestResetRemote(t *testing.T) { fsys := afero.NewMemMapFs() path := filepath.Join(utils.MigrationsDir, "0_schema.sql") require.NoError(t, afero.WriteFile(fsys, path, nil, 0644)) - seedPath := filepath.Join(utils.SeedDataPath) + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") // Will raise an error when seeding require.NoError(t, afero.WriteFile(fsys, seedPath, []byte("INSERT INTO test_table;"), 0644)) // Setup mock postgres diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go index dcc93b4c8..e4072df98 100644 --- a/internal/db/start/start_test.go +++ b/internal/db/start/start_test.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "os" + "path/filepath" "testing" "github.com/docker/docker/api/types" @@ -60,7 +61,7 @@ func TestStartDatabase(t *testing.T) { roles := "create role test" require.NoError(t, afero.WriteFile(fsys, utils.CustomRolesPath, []byte(roles), 0644)) seed := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, utils.SeedDataPath, []byte(seed), 0644)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(seed), 0644)) // Setup mock docker require.NoError(t, apitest.MockDocker(utils.Docker)) defer gock.OffAll() diff --git a/internal/init/init.go b/internal/init/init.go index 033638dd9..f4e470b02 100644 --- a/internal/init/init.go +++ b/internal/init/init.go @@ -40,19 +40,14 @@ func Run(ctx context.Context, fsys afero.Fs, createVscodeSettings, createIntelli return err } - // 2. Create `seed.sql`. - if err := initSeed(fsys); err != nil { - return err - } - - // 3. Append to `.gitignore`. + // 2. Append to `.gitignore`. if utils.IsGitRepo() { if err := updateGitIgnore(utils.GitIgnorePath, fsys); err != nil { return err } } - // 4. Generate VS Code settings. + // 3. Generate VS Code settings. if createVscodeSettings != nil { if *createVscodeSettings { return writeVscodeConfig(fsys) @@ -77,15 +72,6 @@ func Run(ctx context.Context, fsys afero.Fs, createVscodeSettings, createIntelli return nil } -func initSeed(fsys afero.Fs) error { - f, err := fsys.OpenFile(utils.SeedDataPath, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return errors.Errorf("failed to create seed file: %w", err) - } - defer f.Close() - return nil -} - func updateGitIgnore(ignorePath string, fsys afero.Fs) error { var contents []byte diff --git a/internal/init/init_test.go b/internal/init/init_test.go index 09bf6ab0f..47e35b89e 100644 --- a/internal/init/init_test.go +++ b/internal/init/init_test.go @@ -28,10 +28,6 @@ func TestInitCommand(t *testing.T) { exists, err = afero.Exists(fsys, utils.GitIgnorePath) assert.NoError(t, err) assert.True(t, exists) - // Validate generated seed.sql - exists, err = afero.Exists(fsys, utils.SeedDataPath) - assert.NoError(t, err) - assert.True(t, exists) // Validate vscode settings file isn't generated exists, err = afero.Exists(fsys, settingsPath) assert.NoError(t, err) @@ -70,15 +66,6 @@ func TestInitCommand(t *testing.T) { assert.Error(t, Run(context.Background(), fsys, nil, nil, utils.InitParams{})) }) - t.Run("throws error on seed failure", func(t *testing.T) { - // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: utils.SeedDataPath} - // Run test - err := Run(context.Background(), fsys, nil, nil, utils.InitParams{}) - // Check error - assert.ErrorIs(t, err, os.ErrPermission) - }) - t.Run("creates vscode settings file", func(t *testing.T) { // Setup in-memory fs fsys := &afero.MemMapFs{} diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index 44195297f..6796930d2 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -27,11 +27,11 @@ func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys af } func SeedDatabase(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { - err := migration.SeedData(ctx, []string{utils.SeedDataPath}, conn, afero.NewIOFS(fsys)) - if errors.Is(err, os.ErrNotExist) { - return nil + seedPaths, err := utils.GetSeedFiles(fsys) + if err != nil { + return err } - return err + return migration.SeedData(ctx, seedPaths, conn, afero.NewIOFS(fsys)) } func CreateCustomRoles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { diff --git a/internal/migration/apply/apply_test.go b/internal/migration/apply/apply_test.go index b8c041749..6c5b915fd 100644 --- a/internal/migration/apply/apply_test.go +++ b/internal/migration/apply/apply_test.go @@ -44,7 +44,7 @@ func TestMigrateDatabase(t *testing.T) { path := filepath.Join(utils.MigrationsDir, "0_test.sql") sql := "create schema public" require.NoError(t, afero.WriteFile(fsys, path, []byte(sql), 0644)) - seedPath := filepath.Join(utils.SeedDataPath) + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") // This will raise an error when seeding require.NoError(t, afero.WriteFile(fsys, seedPath, []byte("INSERT INTO test_table;"), 0644)) // Setup mock postgres @@ -82,7 +82,7 @@ func TestSeedDatabase(t *testing.T) { fsys := afero.NewMemMapFs() // Setup seed file sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, utils.SeedDataPath, []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(sql), 0644)) // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) @@ -100,7 +100,9 @@ func TestSeedDatabase(t *testing.T) { t.Run("throws error on read failure", func(t *testing.T) { // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: utils.SeedDataPath} + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + fsys := &fstest.OpenErrorFs{DenyPath: seedPath} + _, _ = fsys.Create(seedPath) // Run test err := SeedDatabase(context.Background(), nil, fsys) // Check error @@ -112,7 +114,7 @@ func TestSeedDatabase(t *testing.T) { fsys := afero.NewMemMapFs() // Setup seed file sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, utils.SeedDataPath, []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(sql), 0644)) // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) diff --git a/internal/utils/misc.go b/internal/utils/misc.go index 8118b8213..d4e49f046 100644 --- a/internal/utils/misc.go +++ b/internal/utils/misc.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" "regexp" + "sort" "time" "github.com/docker/docker/client" @@ -148,7 +149,6 @@ var ( FallbackImportMapPath = filepath.Join(FunctionsDir, "import_map.json") FallbackEnvFilePath = filepath.Join(FunctionsDir, ".env") DbTestsDir = filepath.Join(SupabaseDirPath, "tests") - SeedDataPath = filepath.Join(SupabaseDirPath, "seed.sql") CustomRolesPath = filepath.Join(SupabaseDirPath, "roles.sql") ErrNotLinked = errors.Errorf("Cannot find project ref. Have you run %s?", Aqua("supabase link")) @@ -157,6 +157,26 @@ var ( ErrNotRunning = errors.Errorf("%s is not running.", Aqua("supabase start")) ) +// Match the glob patterns from the config to get a deduplicated +// array of all migrations files to apply in the declared order. +func GetSeedFiles(fsys afero.Fs) ([]string, error) { + seedPaths := Config.Db.Seed.SqlPaths + var files []string + for _, pattern := range seedPaths { + fullPattern := filepath.Join(SupabaseDirPath, pattern) + matches, err := afero.Glob(fsys, fullPattern) + if err != nil { + return nil, errors.Errorf("failed to apply glob pattern for %w", err) + } + if len(matches) == 0 { + fmt.Fprintf(os.Stderr, "%s Your pattern %s matched 0 seed files.\n", Yellow("WARNING:"), pattern) + } + sort.Strings(matches) + files = append(files, matches...) + } + return RemoveDuplicates(files), nil +} + func GetCurrentTimestamp() string { // Magic number: https://stackoverflow.com/q/45160822. return time.Now().UTC().Format("20060102150405") diff --git a/internal/utils/misc_test.go b/internal/utils/misc_test.go index 6472c2145..c16abdf00 100644 --- a/internal/utils/misc_test.go +++ b/internal/utils/misc_test.go @@ -75,3 +75,76 @@ func TestProjectRoot(t *testing.T) { assert.Equal(t, cwd, path) }) } + +func TestGetSeedFiles(t *testing.T) { + t.Run("returns seed files matching patterns", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Create seed files + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed1.sql", []byte("INSERT INTO table1 VALUES (1);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed2.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed3.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/another.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/ignore.sql", []byte("INSERT INTO table3 VALUES (3);"), 0644)) + // Mock config patterns + Config.Db.Seed.SqlPaths = []string{"seeds/seed[12].sql", "seeds/ano*.sql"} + + // Run test + files, err := GetSeedFiles(fsys) + + // Check error + assert.NoError(t, err) + // Validate files + assert.ElementsMatch(t, []string{"supabase/seeds/seed1.sql", "supabase/seeds/seed2.sql", "supabase/seeds/another.sql"}, files) + }) + t.Run("returns seed files matching patterns skip duplicates", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Create seed files + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed1.sql", []byte("INSERT INTO table1 VALUES (1);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed2.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed3.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/another.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) + require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/ignore.sql", []byte("INSERT INTO table3 VALUES (3);"), 0644)) + // Mock config patterns + Config.Db.Seed.SqlPaths = []string{"seeds/seed[12].sql", "seeds/ano*.sql", "seeds/seed*.sql"} + + // Run test + files, err := GetSeedFiles(fsys) + + // Check error + assert.NoError(t, err) + // Validate files + assert.ElementsMatch(t, []string{"supabase/seeds/seed1.sql", "supabase/seeds/seed2.sql", "supabase/seeds/another.sql", "supabase/seeds/seed3.sql"}, files) + }) + + t.Run("returns error on invalid pattern", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Mock config patterns + Config.Db.Seed.SqlPaths = []string{"[*!#@D#"} + + // Run test + files, err := GetSeedFiles(fsys) + + // Check error + assert.Nil(t, err) + // The resuling seed list should be empty + assert.ElementsMatch(t, []string{}, files) + }) + + t.Run("returns empty list if no files match", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Mock config patterns + Config.Db.Seed.SqlPaths = []string{"seeds/*.sql"} + + // Run test + files, err := GetSeedFiles(fsys) + + // Check error + assert.NoError(t, err) + // Validate files + assert.Empty(t, files) + }) +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 18414546a..3636e7e9c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -172,7 +172,8 @@ type ( } seed struct { - Enabled bool `toml:"enabled"` + Enabled bool `toml:"enabled"` + SqlPaths []string `toml:"sql_paths"` } pooler struct { @@ -482,7 +483,8 @@ func NewConfig(editors ...ConfigEditor) config { SecretKeyBase: "EAx3IQ/wRG1v47ZD4NE4/9RzBI8Jmil3x0yhcW4V2NHBP6c2iPIzwjofi2Ep4HIG", }, Seed: seed{ - Enabled: true, + Enabled: true, + SqlPaths: []string{"./seed.sql"}, }, }, Realtime: realtime{ diff --git a/pkg/config/templates/config.toml b/pkg/config/templates/config.toml index 37646aa63..112237748 100644 --- a/pkg/config/templates/config.toml +++ b/pkg/config/templates/config.toml @@ -39,6 +39,14 @@ default_pool_size = 20 # Maximum number of client connections allowed. max_client_conn = 100 +[db.seed] +# If enabled, seeds the database after migrations during a db reset. +enabled = true +# Specifies an ordered list of seed files to load during db reset. +# Supports glob patterns relative to supabase directory. For example: +# sql_paths = ['./seeds/*.sql', '../project-src/seeds/*-load-testing.sql'] +sql_paths = ['./seed.sql'] + [realtime] enabled = true # Bind realtime via either IPv4 or IPv6. (default: IPv4) diff --git a/pkg/config/testdata/config.toml b/pkg/config/testdata/config.toml index a7aa36544..76e070c60 100644 --- a/pkg/config/testdata/config.toml +++ b/pkg/config/testdata/config.toml @@ -39,6 +39,14 @@ default_pool_size = 20 # Maximum number of client connections allowed. max_client_conn = 100 +[db.seed] +# If enabled, seeds the database after migrations during a db reset. +enabled = true +# Specifies an ordered list of seed files to load during db reset. +# Supports glob patterns relative to supabase directory. For example: +# sql_paths = ['./seeds/*.sql', '../project-src/seeds/*-load-testing.sql'] +sql_paths = ['./seed.sql'] + [realtime] enabled = true # Bind realtime via either IPv4 or IPv6. (default: IPv6) diff --git a/pkg/config/utils.go b/pkg/config/utils.go index b2318b5a9..ac26a38d2 100644 --- a/pkg/config/utils.go +++ b/pkg/config/utils.go @@ -29,7 +29,6 @@ type pathBuilder struct { FallbackImportMapPath string FallbackEnvFilePath string DbTestsDir string - SeedDataPath string CustomRolesPath string } @@ -63,7 +62,6 @@ func NewPathBuilder(configPath string) pathBuilder { FallbackImportMapPath: filepath.Join(base, "functions", "import_map.json"), FallbackEnvFilePath: filepath.Join(base, "functions", ".env"), DbTestsDir: filepath.Join(base, "tests"), - SeedDataPath: filepath.Join(base, "seed.sql"), CustomRolesPath: filepath.Join(base, "roles.sql"), } } diff --git a/pkg/migration/seed.go b/pkg/migration/seed.go index f299e9f05..62a2c875b 100644 --- a/pkg/migration/seed.go +++ b/pkg/migration/seed.go @@ -12,8 +12,7 @@ import ( func SeedData(ctx context.Context, pending []string, conn *pgx.Conn, fsys fs.FS) error { for _, path := range pending { - filename := filepath.Base(path) - fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", filename) + fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", path) // Batch seed commands, safe to use statement cache if seed, err := NewMigrationFromFile(path, fsys); err != nil { return err From 371a163cc6bafc72fa5a3eca3718a6c3a1d063a8 Mon Sep 17 00:00:00 2001 From: avallete Date: Wed, 25 Sep 2024 09:57:59 +0200 Subject: [PATCH 09/28] chore: add bitbucket build canary badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 63ca688be..3229bc369 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Supabase CLI (v1) -[![Coverage Status](https://coveralls.io/repos/github/supabase/cli/badge.svg?branch=main)](https://coveralls.io/github/supabase/cli?branch=main) +[![Coverage Status](https://coveralls.io/repos/github/supabase/cli/badge.svg?branch=main)](https://coveralls.io/github/supabase/cli?branch=main) [![Bitbucket Pipelines](https://img.shields.io/bitbucket/pipelines/supabase-cli/setup-cli/master?style=flat-square&label=Bitbucket%20Canary)](https://bitbucket.org/supabase-cli/setup-cli/pipelines) [Supabase](https://supabase.io) is an open source Firebase alternative. We're building the features of Firebase using enterprise-grade open source tools. From d5a34fc78536fa0d1cb8f21f2d35476440dfd6d3 Mon Sep 17 00:00:00 2001 From: avallete Date: Wed, 25 Sep 2024 10:12:45 +0200 Subject: [PATCH 10/28] chore: add gitlab canary badge --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3229bc369..750bd8895 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Supabase CLI (v1) -[![Coverage Status](https://coveralls.io/repos/github/supabase/cli/badge.svg?branch=main)](https://coveralls.io/github/supabase/cli?branch=main) [![Bitbucket Pipelines](https://img.shields.io/bitbucket/pipelines/supabase-cli/setup-cli/master?style=flat-square&label=Bitbucket%20Canary)](https://bitbucket.org/supabase-cli/setup-cli/pipelines) +[![Coverage Status](https://coveralls.io/repos/github/supabase/cli/badge.svg?branch=main)](https://coveralls.io/github/supabase/cli?branch=main) [![Bitbucket Pipelines](https://img.shields.io/bitbucket/pipelines/supabase-cli/setup-cli/master?style=flat-square&label=Bitbucket%20Canary)](https://bitbucket.org/supabase-cli/setup-cli/pipelines) [![Gitlab Pipeline Status](https://img.shields.io/gitlab/pipeline-status/sweatybridge%2Fsetup-cli?label=Gitlab%20Canary) +](https://gitlab.com/sweatybridge/setup-cli/-/pipelines) [Supabase](https://supabase.io) is an open source Firebase alternative. We're building the features of Firebase using enterprise-grade open source tools. From 1623aa9b95ec90e21c5bae5a0d50dcf272abe92f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filipe=20Caba=C3=A7o?= Date: Thu, 26 Sep 2024 02:02:07 +0100 Subject: [PATCH 11/28] fix: Bump up studio image (#2711) From ccf214d252a76ea87e5b3d72ed61c66c59ef7a48 Mon Sep 17 00:00:00 2001 From: avallete Date: Fri, 27 Sep 2024 13:53:47 +0200 Subject: [PATCH 12/28] chore: restore realtime to postgres database --- internal/db/start/templates/_supabase.sql | 5 ----- internal/db/start/templates/schema.sql | 3 +++ internal/start/start.go | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/db/start/templates/_supabase.sql b/internal/db/start/templates/_supabase.sql index d391166a2..c339d2b61 100644 --- a/internal/db/start/templates/_supabase.sql +++ b/internal/db/start/templates/_supabase.sql @@ -5,11 +5,6 @@ CREATE DATABASE _supabase WITH OWNER postgres; -- Create schemas in _supabase database for -- internals tools and reports to not overload user database -- with non-user activity -CREATE SCHEMA IF NOT EXISTS _realtime; -ALTER SCHEMA _realtime OWNER TO postgres; -CREATE SCHEMA IF NOT EXISTS realtime; -ALTER SCHEMA realtime OWNER TO postgres; - CREATE SCHEMA IF NOT EXISTS _analytics; ALTER SCHEMA _analytics OWNER TO postgres; diff --git a/internal/db/start/templates/schema.sql b/internal/db/start/templates/schema.sql index 2ff6cf48f..534dd1207 100644 --- a/internal/db/start/templates/schema.sql +++ b/internal/db/start/templates/schema.sql @@ -12,6 +12,9 @@ ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_replication_admin WITH PASSWORD :'pgpass'; ALTER USER supabase_read_only_user WITH PASSWORD :'pgpass'; +create schema if not exists _realtime; +alter schema _realtime owner to postgres; + BEGIN; -- Create pg_net extension diff --git a/internal/start/start.go b/internal/start/start.go index 413b8e2ed..72f34b8e7 100644 --- a/internal/start/start.go +++ b/internal/start/start.go @@ -750,7 +750,7 @@ EOF fmt.Sprintf("DB_PORT=%d", dbConfig.Port), "DB_USER=supabase_admin", "DB_PASSWORD=" + dbConfig.Password, - "DB_NAME=_supabase", + "DB_NAME=" + dbConfig.Database, "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, From e911718c22eecae9b039119c289b766de58fe8fa Mon Sep 17 00:00:00 2001 From: avallete Date: Fri, 27 Sep 2024 16:06:33 +0200 Subject: [PATCH 13/28] chore: remove relatime migration --- internal/db/start/start.go | 2 +- internal/db/start/templates/_supabase.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 8ed608ad7..9614528f5 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -235,7 +235,7 @@ func initRealtimeJob(host string) utils.DockerJob { "DB_PORT=5432", "DB_USER=supabase_admin", "DB_PASSWORD=" + utils.Config.Db.Password, - "DB_NAME=_supabase", + "DB_NAME=postgres", "DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime", "DB_ENC_KEY=" + utils.Config.Realtime.EncryptionKey, "API_JWT_SECRET=" + utils.Config.Auth.JwtSecret, diff --git a/internal/db/start/templates/_supabase.sql b/internal/db/start/templates/_supabase.sql index c339d2b61..6e5d8487b 100644 --- a/internal/db/start/templates/_supabase.sql +++ b/internal/db/start/templates/_supabase.sql @@ -10,4 +10,4 @@ ALTER SCHEMA _analytics OWNER TO postgres; CREATE SCHEMA IF NOT EXISTS _supavisor; ALTER SCHEMA _supavisor OWNER TO postgres; -\c postgres \ No newline at end of file +\c postgres From 3115143937d6ed665bc547e66def7f96032b8c81 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Fri, 27 Sep 2024 18:10:42 +0200 Subject: [PATCH 14/28] Update internal/db/start/start.go Co-authored-by: Han Qiao --- internal/db/start/start.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 9614528f5..74e40554a 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -84,7 +84,8 @@ func NewContainerConfig() container.Config { Retries: 3, }, Entrypoint: []string{"sh", "-c", `cat <<'EOF' > /etc/postgresql.schema.sql && cat <<'EOF' > /etc/postgresql-custom/pgsodium_root.key && docker-entrypoint.sh postgres -D /etc/postgresql -` + initialSchema + "\n" + _supabaseSchema + "\n" + ` +` + initialSchema + ` +` + _supabaseSchema + ` EOF ` + utils.Config.Db.RootKey + ` EOF From c8e553faacdfc90d4d6f04a4c05890d60a1dab21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EB=83=A5=EB=83=90=EC=B1=A0?= Date: Tue, 1 Oct 2024 13:22:30 +0900 Subject: [PATCH 15/28] fix: bump edge-runtime to 1.58.11 (#2718) --- pkg/config/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/constants.go b/pkg/config/constants.go index d75ce61bc..9d91a8489 100644 --- a/pkg/config/constants.go +++ b/pkg/config/constants.go @@ -12,7 +12,7 @@ const ( pgmetaImage = "supabase/postgres-meta:v0.83.2" studioImage = "supabase/studio:20240923-2e3e90c" imageProxyImage = "darthsim/imgproxy:v3.8.0" - edgeRuntimeImage = "supabase/edge-runtime:v1.58.3" + edgeRuntimeImage = "supabase/edge-runtime:v1.58.11" vectorImage = "timberio/vector:0.28.1-alpine" supavisorImage = "supabase/supavisor:1.1.56" gotrueImage = "supabase/gotrue:v2.158.1" From 4ee741f5216b99a69461fb5185f1b248cff06cb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:35:49 +0000 Subject: [PATCH 16/28] chore(deps): bump github.com/containers/common from 0.60.2 to 0.60.4 (#2722) Bumps [github.com/containers/common](https://github.com/containers/common) from 0.60.2 to 0.60.4. - [Release notes](https://github.com/containers/common/releases) - [Commits](https://github.com/containers/common/compare/v0.60.2...v0.60.4) --- updated-dependencies: - dependency-name: github.com/containers/common dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1d790e827..aff55f8eb 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/charmbracelet/bubbletea v0.25.0 github.com/charmbracelet/glamour v0.7.0 github.com/charmbracelet/lipgloss v0.12.1 - github.com/containers/common v0.60.2 + github.com/containers/common v0.60.4 github.com/danieljoos/wincred v1.2.1 github.com/deepmap/oapi-codegen/v2 v2.2.0 github.com/docker/cli v27.2.1+incompatible diff --git a/go.sum b/go.sum index 4fdaab3fb..43131167a 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2 github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containers/common v0.60.2 h1:utcwp2YkO8c0mNlwRxsxfOiqfj157FRrBjxgjR6f+7o= -github.com/containers/common v0.60.2/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= +github.com/containers/common v0.60.4 h1:H5+LAMHPZEqX6vVNOQ+IguVsaFl8kbO/SZ/VPXjxhy0= +github.com/containers/common v0.60.4/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= github.com/containers/storage v1.55.0 h1:wTWZ3YpcQf1F+dSP4KxG9iqDfpQY1otaUXjPpffuhgg= github.com/containers/storage v1.55.0/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= From 3bb06fb7d4f3ca94c6f00f967e2d4082698b6ca5 Mon Sep 17 00:00:00 2001 From: Han Qiao Date: Fri, 4 Oct 2024 16:47:08 +0800 Subject: [PATCH 17/28] chore: glob seed paths when loading config (#2726) --- internal/db/push/push.go | 6 +- internal/db/push/push_test.go | 3 +- internal/db/start/start_test.go | 4 +- internal/migration/apply/apply.go | 6 +- internal/migration/apply/apply_test.go | 18 ++++-- internal/utils/misc.go | 21 ------ internal/utils/misc_test.go | 73 --------------------- pkg/config/config.go | 47 ++++++++++++-- pkg/config/config_test.go | 88 ++++++++++++++++++++++++++ 9 files changed, 152 insertions(+), 114 deletions(-) diff --git a/internal/db/push/push.go b/internal/db/push/push.go index 0bff15163..5439f16cc 100644 --- a/internal/db/push/push.go +++ b/internal/db/push/push.go @@ -41,11 +41,7 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, fmt.Fprintln(os.Stderr, "Would push these migrations:") fmt.Fprint(os.Stderr, utils.Bold(confirmPushAll(pending))) if includeSeed { - seedPaths, err := utils.GetSeedFiles(fsys) - if err != nil { - return err - } - fmt.Fprintf(os.Stderr, "Would seed data %v...\n", seedPaths) + fmt.Fprintf(os.Stderr, "Would seed data %v...\n", utils.Config.Db.Seed.SqlPaths) } } else { msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index 72df15ef9..1616216d9 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -161,8 +161,9 @@ func TestPushAll(t *testing.T) { }) t.Run("throws error on seed failure", func(t *testing.T) { - // Setup in-memory fs seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + utils.Config.Db.Seed.SqlPaths = []string{seedPath} + // Setup in-memory fs fsys := &fstest.OpenErrorFs{DenyPath: seedPath} _, _ = fsys.Create(seedPath) path := filepath.Join(utils.MigrationsDir, "0_test.sql") diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go index e4072df98..555141192 100644 --- a/internal/db/start/start_test.go +++ b/internal/db/start/start_test.go @@ -52,6 +52,8 @@ func TestInitBranch(t *testing.T) { func TestStartDatabase(t *testing.T) { t.Run("initialize main branch", func(t *testing.T) { + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + utils.Config.Db.Seed.SqlPaths = []string{seedPath} utils.Config.Db.MajorVersion = 15 utils.DbId = "supabase_db_test" utils.ConfigId = "supabase_config_test" @@ -61,7 +63,7 @@ func TestStartDatabase(t *testing.T) { roles := "create role test" require.NoError(t, afero.WriteFile(fsys, utils.CustomRolesPath, []byte(roles), 0644)) seed := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(seed), 0644)) + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(seed), 0644)) // Setup mock docker require.NoError(t, apitest.MockDocker(utils.Docker)) defer gock.OffAll() diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index 6796930d2..9ead81dd5 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -27,11 +27,7 @@ func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys af } func SeedDatabase(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { - seedPaths, err := utils.GetSeedFiles(fsys) - if err != nil { - return err - } - return migration.SeedData(ctx, seedPaths, conn, afero.NewIOFS(fsys)) + return migration.SeedData(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) } func CreateCustomRoles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { diff --git a/internal/migration/apply/apply_test.go b/internal/migration/apply/apply_test.go index 6c5b915fd..25362c5fe 100644 --- a/internal/migration/apply/apply_test.go +++ b/internal/migration/apply/apply_test.go @@ -77,12 +77,15 @@ func TestMigrateDatabase(t *testing.T) { } func TestSeedDatabase(t *testing.T) { + seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") + utils.Config.Db.Seed.SqlPaths = []string{seedPath} + t.Run("seeds from file", func(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() // Setup seed file sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(sql), 0644)) // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) @@ -95,12 +98,19 @@ func TestSeedDatabase(t *testing.T) { }) t.Run("ignores missing seed", func(t *testing.T) { - assert.NoError(t, SeedDatabase(context.Background(), nil, afero.NewMemMapFs())) + sqlPaths := utils.Config.Db.Seed.SqlPaths + utils.Config.Db.Seed.SqlPaths = []string{} + t.Cleanup(func() { utils.Config.Db.Seed.SqlPaths = sqlPaths }) + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := SeedDatabase(context.Background(), nil, fsys) + // Check error + assert.NoError(t, err) }) t.Run("throws error on read failure", func(t *testing.T) { // Setup in-memory fs - seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") fsys := &fstest.OpenErrorFs{DenyPath: seedPath} _, _ = fsys.Create(seedPath) // Run test @@ -114,7 +124,7 @@ func TestSeedDatabase(t *testing.T) { fsys := afero.NewMemMapFs() // Setup seed file sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, filepath.Join(utils.SupabaseDirPath, "seed.sql"), []byte(sql), 0644)) + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(sql), 0644)) // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) diff --git a/internal/utils/misc.go b/internal/utils/misc.go index d4e49f046..adb0efa9c 100644 --- a/internal/utils/misc.go +++ b/internal/utils/misc.go @@ -8,7 +8,6 @@ import ( "os" "path/filepath" "regexp" - "sort" "time" "github.com/docker/docker/client" @@ -157,26 +156,6 @@ var ( ErrNotRunning = errors.Errorf("%s is not running.", Aqua("supabase start")) ) -// Match the glob patterns from the config to get a deduplicated -// array of all migrations files to apply in the declared order. -func GetSeedFiles(fsys afero.Fs) ([]string, error) { - seedPaths := Config.Db.Seed.SqlPaths - var files []string - for _, pattern := range seedPaths { - fullPattern := filepath.Join(SupabaseDirPath, pattern) - matches, err := afero.Glob(fsys, fullPattern) - if err != nil { - return nil, errors.Errorf("failed to apply glob pattern for %w", err) - } - if len(matches) == 0 { - fmt.Fprintf(os.Stderr, "%s Your pattern %s matched 0 seed files.\n", Yellow("WARNING:"), pattern) - } - sort.Strings(matches) - files = append(files, matches...) - } - return RemoveDuplicates(files), nil -} - func GetCurrentTimestamp() string { // Magic number: https://stackoverflow.com/q/45160822. return time.Now().UTC().Format("20060102150405") diff --git a/internal/utils/misc_test.go b/internal/utils/misc_test.go index c16abdf00..6472c2145 100644 --- a/internal/utils/misc_test.go +++ b/internal/utils/misc_test.go @@ -75,76 +75,3 @@ func TestProjectRoot(t *testing.T) { assert.Equal(t, cwd, path) }) } - -func TestGetSeedFiles(t *testing.T) { - t.Run("returns seed files matching patterns", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Create seed files - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed1.sql", []byte("INSERT INTO table1 VALUES (1);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed2.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed3.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/another.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/ignore.sql", []byte("INSERT INTO table3 VALUES (3);"), 0644)) - // Mock config patterns - Config.Db.Seed.SqlPaths = []string{"seeds/seed[12].sql", "seeds/ano*.sql"} - - // Run test - files, err := GetSeedFiles(fsys) - - // Check error - assert.NoError(t, err) - // Validate files - assert.ElementsMatch(t, []string{"supabase/seeds/seed1.sql", "supabase/seeds/seed2.sql", "supabase/seeds/another.sql"}, files) - }) - t.Run("returns seed files matching patterns skip duplicates", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Create seed files - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed1.sql", []byte("INSERT INTO table1 VALUES (1);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed2.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/seed3.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/another.sql", []byte("INSERT INTO table2 VALUES (2);"), 0644)) - require.NoError(t, afero.WriteFile(fsys, "supabase/seeds/ignore.sql", []byte("INSERT INTO table3 VALUES (3);"), 0644)) - // Mock config patterns - Config.Db.Seed.SqlPaths = []string{"seeds/seed[12].sql", "seeds/ano*.sql", "seeds/seed*.sql"} - - // Run test - files, err := GetSeedFiles(fsys) - - // Check error - assert.NoError(t, err) - // Validate files - assert.ElementsMatch(t, []string{"supabase/seeds/seed1.sql", "supabase/seeds/seed2.sql", "supabase/seeds/another.sql", "supabase/seeds/seed3.sql"}, files) - }) - - t.Run("returns error on invalid pattern", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Mock config patterns - Config.Db.Seed.SqlPaths = []string{"[*!#@D#"} - - // Run test - files, err := GetSeedFiles(fsys) - - // Check error - assert.Nil(t, err) - // The resuling seed list should be empty - assert.ElementsMatch(t, []string{}, files) - }) - - t.Run("returns empty list if no files match", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Mock config patterns - Config.Db.Seed.SqlPaths = []string{"seeds/*.sql"} - - // Run test - files, err := GetSeedFiles(fsys) - - // Check error - assert.NoError(t, err) - // Validate files - assert.Empty(t, files) - }) -} diff --git a/pkg/config/config.go b/pkg/config/config.go index 3636e7e9c..06c3baec3 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -16,6 +16,7 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" "strings" "text/template" @@ -172,8 +173,9 @@ type ( } seed struct { - Enabled bool `toml:"enabled"` - SqlPaths []string `toml:"sql_paths"` + Enabled bool `toml:"enabled"` + GlobPatterns []string `toml:"sql_paths"` + SqlPaths []string `toml:"-"` } pooler struct { @@ -483,8 +485,8 @@ func NewConfig(editors ...ConfigEditor) config { SecretKeyBase: "EAx3IQ/wRG1v47ZD4NE4/9RzBI8Jmil3x0yhcW4V2NHBP6c2iPIzwjofi2Ep4HIG", }, Seed: seed{ - Enabled: true, - SqlPaths: []string{"./seed.sql"}, + Enabled: true, + GlobPatterns: []string{"./seed.sql"}, }, }, Realtime: realtime{ @@ -708,6 +710,9 @@ func (c *config) Load(path string, fsys fs.FS) error { } c.Functions[slug] = function } + if err := c.Db.Seed.loadSeedPaths(builder.SupabaseDirPath, fsys); err != nil { + return err + } if err := c.baseConfig.Validate(); err != nil { return err } @@ -1041,6 +1046,40 @@ func loadEnvIfExists(path string) error { return nil } +// Match the glob patterns from the config to get a deduplicated +// array of all migrations files to apply in the declared order. +func (c *seed) loadSeedPaths(basePath string, fsys fs.FS) error { + if !c.Enabled { + return nil + } + if c.SqlPaths != nil { + // Reuse already allocated array + c.SqlPaths = c.SqlPaths[:0] + } + set := make(map[string]struct{}) + for _, pattern := range c.GlobPatterns { + if !filepath.IsAbs(pattern) { + pattern = filepath.Join(basePath, pattern) + } + matches, err := fs.Glob(fsys, pattern) + if err != nil { + return errors.Errorf("failed to apply glob pattern: %w", err) + } + if len(matches) == 0 { + fmt.Fprintln(os.Stderr, "No seed files matched pattern:", pattern) + } + sort.Strings(matches) + // Remove duplicates + for _, item := range matches { + if _, exists := set[item]; !exists { + set[item] = struct{}{} + c.SqlPaths = append(c.SqlPaths, item) + } + } + } + return nil +} + func (h *hookConfig) HandleHook(hookType string) error { // If not enabled do nothing if !h.Enabled { diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 733b3c7b7..7d31d77ae 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -3,6 +3,7 @@ package config import ( "bytes" _ "embed" + "path" "strings" "testing" fs "testing/fstest" @@ -247,3 +248,90 @@ func TestValidateHookURI(t *testing.T) { }) } } + +func TestLoadSeedPaths(t *testing.T) { + t.Run("returns seed files matching patterns", func(t *testing.T) { + // Setup in-memory fs + fsys := fs.MapFS{ + "supabase/seeds/seed1.sql": &fs.MapFile{Data: []byte("INSERT INTO table1 VALUES (1);")}, + "supabase/seeds/seed2.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/seed3.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/another.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/ignore.sql": &fs.MapFile{Data: []byte("INSERT INTO table3 VALUES (3);")}, + } + // Mock config patterns + config := seed{ + Enabled: true, + GlobPatterns: []string{ + "seeds/seed[12].sql", + "seeds/ano*.sql", + }, + } + // Run test + err := config.loadSeedPaths("supabase", fsys) + // Check error + assert.NoError(t, err) + // Validate files + assert.ElementsMatch(t, []string{ + "supabase/seeds/seed1.sql", + "supabase/seeds/seed2.sql", + "supabase/seeds/another.sql", + }, config.SqlPaths) + }) + t.Run("returns seed files matching patterns skip duplicates", func(t *testing.T) { + // Setup in-memory fs + fsys := fs.MapFS{ + "supabase/seeds/seed1.sql": &fs.MapFile{Data: []byte("INSERT INTO table1 VALUES (1);")}, + "supabase/seeds/seed2.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/seed3.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/another.sql": &fs.MapFile{Data: []byte("INSERT INTO table2 VALUES (2);")}, + "supabase/seeds/ignore.sql": &fs.MapFile{Data: []byte("INSERT INTO table3 VALUES (3);")}, + } + // Mock config patterns + config := seed{ + Enabled: true, + GlobPatterns: []string{ + "seeds/seed[12].sql", + "seeds/ano*.sql", + "seeds/seed*.sql", + }, + } + // Run test + err := config.loadSeedPaths("supabase", fsys) + // Check error + assert.NoError(t, err) + // Validate files + assert.ElementsMatch(t, []string{ + "supabase/seeds/seed1.sql", + "supabase/seeds/seed2.sql", + "supabase/seeds/another.sql", + "supabase/seeds/seed3.sql", + }, config.SqlPaths) + }) + + t.Run("returns error on invalid pattern", func(t *testing.T) { + // Setup in-memory fs + fsys := fs.MapFS{} + // Mock config patterns + config := seed{Enabled: true, GlobPatterns: []string{"[*!#@D#"}} + // Run test + err := config.loadSeedPaths("", fsys) + // Check error + assert.ErrorIs(t, err, path.ErrBadPattern) + // The resuling seed list should be empty + assert.Empty(t, config.SqlPaths) + }) + + t.Run("returns empty list if no files match", func(t *testing.T) { + // Setup in-memory fs + fsys := fs.MapFS{} + // Mock config patterns + config := seed{Enabled: true, GlobPatterns: []string{"seeds/*.sql"}} + // Run test + err := config.loadSeedPaths("", fsys) + // Check error + assert.NoError(t, err) + // Validate files + assert.Empty(t, config.SqlPaths) + }) +} From 0efc21dcb23a5935d7d96a5f01643058b931ccb0 Mon Sep 17 00:00:00 2001 From: Ivan Vasilov Date: Fri, 4 Oct 2024 15:21:35 +0200 Subject: [PATCH 18/28] chore: Bump studio to the latest image version 20240930 (#2720) Update studio version to 20240930 --- pkg/config/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/config/constants.go b/pkg/config/constants.go index 9d91a8489..c51e19fbd 100644 --- a/pkg/config/constants.go +++ b/pkg/config/constants.go @@ -10,7 +10,7 @@ const ( inbucketImage = "inbucket/inbucket:3.0.3" postgrestImage = "postgrest/postgrest:v12.2.0" pgmetaImage = "supabase/postgres-meta:v0.83.2" - studioImage = "supabase/studio:20240923-2e3e90c" + studioImage = "supabase/studio:20240930-16f2b8e" imageProxyImage = "darthsim/imgproxy:v3.8.0" edgeRuntimeImage = "supabase/edge-runtime:v1.58.11" vectorImage = "timberio/vector:0.28.1-alpine" From 333a2ca5522e493cd35a853f258e9fd9b5098558 Mon Sep 17 00:00:00 2001 From: Andrew Valleteau Date: Fri, 4 Oct 2024 18:14:57 +0200 Subject: [PATCH 19/28] fix: email templates for monorepos (#2723) * fix: email templates for monorepos Fixes: https://github.com/supabase/cli/issues/2721 * Update pkg/config/config.go * Revert "Update pkg/config/config.go" This reverts commit 2dbc9f0e1dd5ee1b3f14f9cc17e01c5ec0fe984e. --------- Co-authored-by: Han Qiao Co-authored-by: Qiao Han --- pkg/config/config.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 06c3baec3..0b2468224 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -710,10 +710,11 @@ func (c *config) Load(path string, fsys fs.FS) error { } c.Functions[slug] = function } + if err := c.Db.Seed.loadSeedPaths(builder.SupabaseDirPath, fsys); err != nil { return err } - if err := c.baseConfig.Validate(); err != nil { + if err := c.baseConfig.Validate(fsys); err != nil { return err } c.Remotes = make(map[string]baseConfig, len(c.Overrides)) @@ -730,7 +731,7 @@ func (c *config) Load(path string, fsys fs.FS) error { } else if undecoded := metadata.Undecoded(); len(undecoded) > 0 { fmt.Fprintf(os.Stderr, "Unknown config fields: %+v\n", undecoded) } - if err := base.Validate(); err != nil { + if err := base.Validate(fsys); err != nil { return err } c.Remotes[name] = base @@ -738,7 +739,7 @@ func (c *config) Load(path string, fsys fs.FS) error { return nil } -func (c *baseConfig) Validate() error { +func (c *baseConfig) Validate(fsys fs.FS) error { if c.ProjectId == "" { return errors.New("Missing required field in config: project_id") } else if sanitized := sanitizeProjectId(c.ProjectId); sanitized != c.ProjectId { @@ -833,8 +834,10 @@ func (c *baseConfig) Validate() error { } // Validate email config for name, tmpl := range c.Auth.Email.Template { - if len(tmpl.ContentPath) > 0 && !fs.ValidPath(filepath.Clean(tmpl.ContentPath)) { - return errors.Errorf("Invalid config for auth.email.%s.content_path: %s", name, tmpl.ContentPath) + if len(tmpl.ContentPath) > 0 { + if _, err = fs.Stat(fsys, filepath.Clean(tmpl.ContentPath)); err != nil { + return errors.Errorf("Invalid config for auth.email.%s.content_path: %s", name, tmpl.ContentPath) + } } } if c.Auth.Email.Smtp.Pass, err = maybeLoadEnv(c.Auth.Email.Smtp.Pass); err != nil { From 9365527e770dcf21d88d6f2a37c5dd69038f1d68 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Sun, 6 Oct 2024 16:08:00 +0800 Subject: [PATCH 20/28] fix: inspect storage container before seeding buckets --- internal/db/reset/reset.go | 11 +++-- internal/db/reset/reset_test.go | 74 ++++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 9 deletions(-) diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index d53392ae9..2622e77ff 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/errdefs" @@ -54,9 +55,11 @@ func Run(ctx context.Context, version string, config pgconn.Config, fsys afero.F return err } // Seed objects from supabase/buckets directory - if utils.Config.Storage.Enabled { - if err := start.WaitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil { - return err + if resp, err := utils.Docker.ContainerInspect(ctx, utils.StorageId); err == nil { + if resp.State.Health == nil || resp.State.Health.Status != types.Healthy { + if err := start.WaitForHealthyService(ctx, 30*time.Second, utils.StorageId); err != nil { + return err + } } if err := buckets.Run(ctx, "", false, fsys); err != nil { return err @@ -212,7 +215,7 @@ func restartServices(ctx context.Context) error { services := listServicesToRestart() result := utils.WaitAll(services, func(id string) error { if err := utils.Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil && !errdefs.IsNotFound(err) { - return errors.Errorf("Failed to restart %s: %w", id, err) + return errors.Errorf("failed to restart %s: %w", id, err) } return nil }) diff --git a/internal/db/reset/reset_test.go b/internal/db/reset/reset_test.go index ace03c9b4..c65aa0ef9 100644 --- a/internal/db/reset/reset_test.go +++ b/internal/db/reset/reset_test.go @@ -24,6 +24,7 @@ import ( "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/pkg/migration" "github.com/supabase/cli/pkg/pgtest" + "github.com/supabase/cli/pkg/storage" ) func TestResetCommand(t *testing.T) { @@ -38,6 +39,69 @@ func TestResetCommand(t *testing.T) { Database: "postgres", } + t.Run("seeds storage after reset", func(t *testing.T) { + utils.DbId = "test-reset" + utils.ConfigId = "test-config" + utils.Config.Db.MajorVersion = 15 + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK). + JSON(types.ContainerJSON{}) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId). + Reply(http.StatusOK) + gock.New(utils.Docker.DaemonHost()). + Delete("/v" + utils.Docker.ClientVersion() + "/volumes/" + utils.DbId). + Reply(http.StatusOK) + apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Db.Image), utils.DbId) + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + // Restarts services + utils.StorageId = "test-storage" + utils.GotrueId = "test-auth" + utils.RealtimeId = "test-realtime" + utils.PoolerId = "test-pooler" + for _, container := range listServicesToRestart() { + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusOK) + } + // Seeds storage + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.StorageId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: types.Healthy}, + }, + }}) + gock.New(utils.Config.Api.ExternalUrl). + Get("/storage/v1/bucket"). + Reply(http.StatusOK). + JSON([]storage.BucketResponse{}) + // Run test + err := Run(context.Background(), "", dbConfig, fsys, conn.Intercept) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + t.Run("throws error on context canceled", func(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() @@ -225,7 +289,7 @@ func TestRestartDatabase(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) // Restarts services @@ -260,7 +324,7 @@ func TestRestartDatabase(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) // Restarts services @@ -279,9 +343,9 @@ func TestRestartDatabase(t *testing.T) { // Run test err := RestartDatabase(context.Background(), io.Discard) // Check error - assert.ErrorContains(t, err, "Failed to restart "+utils.StorageId) - assert.ErrorContains(t, err, "Failed to restart "+utils.GotrueId) - assert.ErrorContains(t, err, "Failed to restart "+utils.RealtimeId) + assert.ErrorContains(t, err, "failed to restart "+utils.StorageId) + assert.ErrorContains(t, err, "failed to restart "+utils.GotrueId) + assert.ErrorContains(t, err, "failed to restart "+utils.RealtimeId) assert.Empty(t, apitest.ListUnmatchedRequests()) }) From 73b591c6ecd428301735e089d3aa7af71bc7ce5f Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Sun, 6 Oct 2024 16:08:41 +0800 Subject: [PATCH 21/28] chore: use healthy enum from docker api --- internal/db/diff/diff_test.go | 6 +++--- internal/db/start/start_test.go | 4 ++-- internal/migration/squash/squash_test.go | 6 +++--- internal/start/start_test.go | 6 +++--- internal/status/status.go | 3 ++- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/internal/db/diff/diff_test.go b/internal/db/diff/diff_test.go index d5e8c6a46..6ada103c7 100644 --- a/internal/db/diff/diff_test.go +++ b/internal/db/diff/diff_test.go @@ -55,7 +55,7 @@ func TestRun(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-shadow-realtime") @@ -267,7 +267,7 @@ func TestDiffDatabase(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Docker.DaemonHost()). @@ -303,7 +303,7 @@ At statement 0: create schema public`) JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Docker.DaemonHost()). diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go index 555141192..8d1816b0b 100644 --- a/internal/db/start/start_test.go +++ b/internal/db/start/start_test.go @@ -78,7 +78,7 @@ func TestStartDatabase(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) apitest.MockDockerStart(utils.Docker, utils.GetRegistryImageUrl(utils.Config.Realtime.Image), "test-realtime") @@ -126,7 +126,7 @@ func TestStartDatabase(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) // Run test diff --git a/internal/migration/squash/squash_test.go b/internal/migration/squash/squash_test.go index c85f014c0..cc0461f3b 100644 --- a/internal/migration/squash/squash_test.go +++ b/internal/migration/squash/squash_test.go @@ -61,7 +61,7 @@ func TestSquashCommand(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Docker.DaemonHost()). @@ -251,7 +251,7 @@ func TestSquashMigrations(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Docker.DaemonHost()). @@ -286,7 +286,7 @@ func TestSquashMigrations(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Docker.DaemonHost()). diff --git a/internal/start/start_test.go b/internal/start/start_test.go index 654831ccb..079eb3c79 100644 --- a/internal/start/start_test.go +++ b/internal/start/start_test.go @@ -160,7 +160,7 @@ func TestDatabaseStart(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) } @@ -177,7 +177,7 @@ func TestDatabaseStart(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) gock.New(utils.Config.Api.ExternalUrl). @@ -225,7 +225,7 @@ func TestDatabaseStart(t *testing.T) { JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ State: &types.ContainerState{ Running: true, - Health: &types.Health{Status: "healthy"}, + Health: &types.Health{Status: types.Healthy}, }, }}) // Run test diff --git a/internal/status/status.go b/internal/status/status.go index e78af5cd2..1b9d02f90 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/go-errors/errors" "github.com/spf13/afero" @@ -114,7 +115,7 @@ func assertContainerHealthy(ctx context.Context, container string) error { return errors.Errorf("failed to inspect container health: %w", err) } else if !resp.State.Running { return errors.Errorf("%s container is not running: %s", container, resp.State.Status) - } else if resp.State.Health != nil && resp.State.Health.Status != "healthy" { + } else if resp.State.Health != nil && resp.State.Health.Status != types.Healthy { return errors.Errorf("%s container is not ready: %s", container, resp.State.Health.Status) } return nil From b04503926c2dbe2815d2b25b0d800aa1a775c547 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Sun, 6 Oct 2024 16:08:58 +0800 Subject: [PATCH 22/28] chore: remove syslog config --- internal/db/reset/reset.go | 4 ---- internal/db/start/start.go | 2 -- 2 files changed, 6 deletions(-) diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index 2622e77ff..88e595047 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -115,10 +115,6 @@ func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options if err := utils.Docker.VolumeRemove(ctx, utils.DbId, true); err != nil { return errors.Errorf("failed to remove volume: %w", err) } - // Skip syslog if vector container is not started - if _, err := utils.Docker.ContainerInspect(ctx, utils.VectorId); err != nil { - utils.Config.Analytics.Enabled = false - } config := start.NewContainerConfig() hostConfig := start.NewHostConfig() networkingConfig := network.NetworkingConfig{ diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 74e40554a..912f845df 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -43,8 +43,6 @@ func Run(ctx context.Context, fsys afero.Fs) error { } else if !errors.Is(err, utils.ErrNotRunning) { return err } - // Skip logflare container in db start - utils.Config.Analytics.Enabled = false err := StartDatabase(ctx, fsys, os.Stderr) if err != nil { if err := utils.DockerRemoveAll(context.Background(), os.Stderr, utils.Config.ProjectId); err != nil { From 887b5bb921ee4df787ac4c6045d00360112b3470 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Fri, 4 Oct 2024 23:14:51 +0800 Subject: [PATCH 23/28] feat: track seeded files in migration table --- internal/db/push/push.go | 33 +++++++-- internal/migration/apply/apply.go | 6 +- internal/migration/apply/apply_test.go | 62 ----------------- pkg/migration/file.go | 64 +++++++++++++---- pkg/migration/history.go | 12 ++++ pkg/migration/seed.go | 58 ++++++++++++++-- pkg/migration/seed_test.go | 95 +++++++++++++++++++++++--- pkg/pgtest/mock.go | 8 ++- pkg/pgxv5/rows.go | 22 +++--- 9 files changed, 252 insertions(+), 108 deletions(-) diff --git a/internal/db/push/push.go b/internal/db/push/push.go index 5439f16cc..226a76751 100644 --- a/internal/db/push/push.go +++ b/internal/db/push/push.go @@ -29,7 +29,14 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, if err != nil { return err } - if len(pending) == 0 { + var seeds []migration.SeedFile + if includeSeed { + seeds, err = migration.GetPendingSeeds(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) + if err != nil { + return err + } + } + if len(pending) == 0 && len(seeds) == 0 { fmt.Println("Remote database is up to date.") return nil } @@ -38,10 +45,13 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, if includeRoles { fmt.Fprintln(os.Stderr, "Would create custom roles "+utils.Bold(utils.CustomRolesPath)+"...") } - fmt.Fprintln(os.Stderr, "Would push these migrations:") - fmt.Fprint(os.Stderr, utils.Bold(confirmPushAll(pending))) - if includeSeed { - fmt.Fprintf(os.Stderr, "Would seed data %v...\n", utils.Config.Db.Seed.SqlPaths) + if len(pending) > 0 { + fmt.Fprintln(os.Stderr, "Would push these migrations:") + fmt.Fprint(os.Stderr, utils.Bold(confirmPushAll(pending))) + } + if includeSeed && len(seeds) > 0 { + fmt.Fprintln(os.Stderr, "Would seed these files:") + fmt.Fprint(os.Stderr, utils.Bold(confirmSeedAll(seeds))) } } else { msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) @@ -59,7 +69,7 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, return err } if includeSeed { - if err := apply.SeedDatabase(ctx, conn, fsys); err != nil { + if err := migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)); err != nil { return err } } @@ -75,3 +85,14 @@ func confirmPushAll(pending []string) (msg string) { } return msg } + +func confirmSeedAll(pending []migration.SeedFile) (msg string) { + for _, seed := range pending { + notice := seed.Path + if seed.Dirty { + notice += " (hash update)" + } + msg += fmt.Sprintf(" • %s\n", notice) + } + return msg +} diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index 9ead81dd5..f8c466259 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -27,7 +27,11 @@ func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys af } func SeedDatabase(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { - return migration.SeedData(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) + seeds, err := migration.GetPendingSeeds(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) + if err != nil { + return err + } + return migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)) } func CreateCustomRoles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { diff --git a/internal/migration/apply/apply_test.go b/internal/migration/apply/apply_test.go index 25362c5fe..286093743 100644 --- a/internal/migration/apply/apply_test.go +++ b/internal/migration/apply/apply_test.go @@ -6,7 +6,6 @@ import ( "path/filepath" "testing" - "github.com/jackc/pgerrcode" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -75,64 +74,3 @@ func TestMigrateDatabase(t *testing.T) { assert.ErrorIs(t, err, os.ErrPermission) }) } - -func TestSeedDatabase(t *testing.T) { - seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") - utils.Config.Db.Seed.SqlPaths = []string{seedPath} - - t.Run("seeds from file", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Setup seed file - sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(sql), 0644)) - // Setup mock postgres - conn := pgtest.NewConn() - defer conn.Close(t) - conn.Query(sql). - Reply("INSERT 0 1") - // Run test - err := SeedDatabase(context.Background(), conn.MockClient(t), fsys) - // Check error - assert.NoError(t, err) - }) - - t.Run("ignores missing seed", func(t *testing.T) { - sqlPaths := utils.Config.Db.Seed.SqlPaths - utils.Config.Db.Seed.SqlPaths = []string{} - t.Cleanup(func() { utils.Config.Db.Seed.SqlPaths = sqlPaths }) - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Run test - err := SeedDatabase(context.Background(), nil, fsys) - // Check error - assert.NoError(t, err) - }) - - t.Run("throws error on read failure", func(t *testing.T) { - // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: seedPath} - _, _ = fsys.Create(seedPath) - // Run test - err := SeedDatabase(context.Background(), nil, fsys) - // Check error - assert.ErrorIs(t, err, os.ErrPermission) - }) - - t.Run("throws error on insert failure", func(t *testing.T) { - // Setup in-memory fs - fsys := afero.NewMemMapFs() - // Setup seed file - sql := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(sql), 0644)) - // Setup mock postgres - conn := pgtest.NewConn() - defer conn.Close(t) - conn.Query(sql). - ReplyError(pgerrcode.NotNullViolation, `null value in column "age" of relation "employees"`) - // Run test - err := SeedDatabase(context.Background(), conn.MockClient(t), fsys) - // Check error - assert.ErrorContains(t, err, `ERROR: null value in column "age" of relation "employees" (SQLSTATE 23502)`) - }) -} diff --git a/pkg/migration/file.go b/pkg/migration/file.go index 04a945565..79da1c86a 100644 --- a/pkg/migration/file.go +++ b/pkg/migration/file.go @@ -2,6 +2,8 @@ package migration import ( "context" + "crypto/sha256" + "encoding/hex" "io" "io/fs" "path/filepath" @@ -24,6 +26,22 @@ type MigrationFile struct { var migrateFilePattern = regexp.MustCompile(`^([0-9]+)_(.*)\.sql$`) func NewMigrationFromFile(path string, fsys fs.FS) (*MigrationFile, error) { + lines, err := parseFile(path, fsys) + if err != nil { + return nil, err + } + file := MigrationFile{Statements: lines} + // Parse version from file name + filename := filepath.Base(path) + matches := migrateFilePattern.FindStringSubmatch(filename) + if len(matches) > 2 { + file.Version = matches[1] + file.Name = matches[2] + } + return &file, nil +} + +func parseFile(path string, fsys fs.FS) ([]string, error) { sql, err := fsys.Open(path) if err != nil { return nil, errors.Errorf("failed to open migration file: %w", err) @@ -37,17 +55,7 @@ func NewMigrationFromFile(path string, fsys fs.FS) (*MigrationFile, error) { } } } - file, err := NewMigrationFromReader(sql) - if err == nil { - // Parse version from file name - filename := filepath.Base(path) - matches := migrateFilePattern.FindStringSubmatch(filename) - if len(matches) > 2 { - file.Version = matches[1] - file.Name = matches[2] - } - } - return file, err + return parser.SplitAndTrim(sql) } func NewMigrationFromReader(sql io.Reader) (*MigrationFile, error) { @@ -112,12 +120,40 @@ func (m *MigrationFile) insertVersionSQL(conn *pgx.Conn, batch *pgconn.Batch) er return nil } -func (m *MigrationFile) ExecBatchWithCache(ctx context.Context, conn *pgx.Conn) error { +type SeedFile struct { + Path string + Hash string + Dirty bool `db:"-"` +} + +func NewSeedFile(path string, fsys fs.FS) (*SeedFile, error) { + sql, err := fsys.Open(path) + if err != nil { + return nil, errors.Errorf("failed to open seed file: %w", err) + } + defer sql.Close() + hash := sha256.New() + if _, err := io.Copy(hash, sql); err != nil { + return nil, errors.Errorf("failed to hash file: %w", err) + } + digest := hex.EncodeToString(hash.Sum(nil)) + return &SeedFile{Path: path, Hash: digest}, nil +} + +func (m *SeedFile) ExecBatchWithCache(ctx context.Context, conn *pgx.Conn, fsys fs.FS) error { + // Parse each file individually to reduce memory usage + lines, err := parseFile(m.Path, fsys) + if err != nil { + return err + } // Data statements don't mutate schemas, safe to use statement cache batch := pgx.Batch{} - for _, line := range m.Statements { - batch.Queue(line) + if !m.Dirty { + for _, line := range lines { + batch.Queue(line) + } } + batch.Queue(UPSERT_SEED_FILE, m.Path, m.Hash) // No need to track version here because there are no schema changes if err := conn.SendBatch(ctx, &batch).Close(); err != nil { return errors.Errorf("failed to send batch: %w", err) diff --git a/pkg/migration/history.go b/pkg/migration/history.go index 6dc8b5fc8..832651874 100644 --- a/pkg/migration/history.go +++ b/pkg/migration/history.go @@ -21,6 +21,9 @@ const ( TRUNCATE_VERSION_TABLE = "TRUNCATE supabase_migrations.schema_migrations" SELECT_VERSION_TABLE = "SELECT * FROM supabase_migrations.schema_migrations" LIST_MIGRATION_VERSION = "SELECT version FROM supabase_migrations.schema_migrations ORDER BY version" + CREATE_SEED_TABLE = "CREATE TABLE IF NOT EXISTS supabase_migrations.seed_files (path text NOT NULL PRIMARY KEY, hash text NOT NULL)" + UPSERT_SEED_FILE = "INSERT INTO supabase_migrations.seed_files(path, hash) VALUES($1, $2) ON CONFLICT (path) DO UPDATE SET hash = EXCLUDED.hash" + SELECT_SEED_TABLE = "SELECT path, hash FROM supabase_migrations.seed_files" ) // TODO: support overriding `supabase_migrations.schema_migrations` with user defined . @@ -33,6 +36,7 @@ func CreateMigrationTable(ctx context.Context, conn *pgx.Conn) error { batch.ExecParams(CREATE_VERSION_TABLE, nil, nil, nil, nil) batch.ExecParams(ADD_STATEMENTS_COLUMN, nil, nil, nil, nil) batch.ExecParams(ADD_NAME_COLUMN, nil, nil, nil, nil) + batch.ExecParams(CREATE_SEED_TABLE, nil, nil, nil, nil) if _, err := conn.PgConn().ExecBatch(ctx, &batch).ReadAll(); err != nil { return errors.Errorf("failed to create migration table: %w", err) } @@ -46,3 +50,11 @@ func ReadMigrationTable(ctx context.Context, conn *pgx.Conn) ([]MigrationFile, e } return pgxv5.CollectRows[MigrationFile](rows) } + +func ReadSeedTable(ctx context.Context, conn *pgx.Conn) ([]SeedFile, error) { + rows, err := conn.Query(ctx, SELECT_SEED_TABLE) + if err != nil { + return nil, errors.Errorf("failed to read seed table: %w", err) + } + return pgxv5.CollectRows[SeedFile](rows) +} diff --git a/pkg/migration/seed.go b/pkg/migration/seed.go index 62a2c875b..6fd5aa43c 100644 --- a/pkg/migration/seed.go +++ b/pkg/migration/seed.go @@ -7,16 +7,62 @@ import ( "os" "path/filepath" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgerrcode" "github.com/jackc/pgx/v4" ) -func SeedData(ctx context.Context, pending []string, conn *pgx.Conn, fsys fs.FS) error { - for _, path := range pending { - fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", path) +func getRemoteSeeds(ctx context.Context, conn *pgx.Conn) (map[string]string, error) { + remotes, err := ReadSeedTable(ctx, conn) + if err != nil { + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) && pgErr.Code == pgerrcode.UndefinedTable { + // If seed table is undefined, the remote project has no migrations + return map[string]string{}, nil + } + return map[string]string{}, err + } + applied := make(map[string]string, len(remotes)) + for _, seed := range remotes { + applied[seed.Path] = seed.Hash + } + return applied, nil +} + +func GetPendingSeeds(ctx context.Context, locals []string, conn *pgx.Conn, fsys fs.FS) ([]SeedFile, error) { + applied, err := getRemoteSeeds(ctx, conn) + if err != nil { + return nil, err + } + var pending []SeedFile + for _, path := range locals { + seed, err := NewSeedFile(path, fsys) + if err != nil { + return nil, err + } + if hash, exists := applied[seed.Path]; exists { + // Skip seed files that already exist + if hash == seed.Hash { + continue + } + // Mark seed file as dirty + seed.Dirty = true + } + pending = append(pending, *seed) + } + return pending, nil +} + +func SeedData(ctx context.Context, pending []SeedFile, conn *pgx.Conn, fsys fs.FS) error { + for _, seed := range pending { + if seed.Dirty { + fmt.Fprintf(os.Stderr, "Updating seed file hash %s...\n", seed.Path) + } else { + fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", seed.Path) + } // Batch seed commands, safe to use statement cache - if seed, err := NewMigrationFromFile(path, fsys); err != nil { - return err - } else if err := seed.ExecBatchWithCache(ctx, conn); err != nil { + if err := seed.ExecBatchWithCache(ctx, conn, fsys); err != nil { return err } } diff --git a/pkg/migration/seed_test.go b/pkg/migration/seed_test.go index e81d814a9..db1865ace 100644 --- a/pkg/migration/seed_test.go +++ b/pkg/migration/seed_test.go @@ -8,45 +8,122 @@ import ( fs "testing/fstest" "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/supabase/cli/pkg/pgtest" ) //go:embed testdata/seed.sql var testSeed string -func TestSeedData(t *testing.T) { +func TestPendingSeeds(t *testing.T) { pending := []string{"testdata/seed.sql"} - t.Run("seeds from file", func(t *testing.T) { + t.Run("finds new seeds", func(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(testSeed). - Reply("INSERT 0 1") + conn.Query(SELECT_SEED_TABLE). + Reply("SELECT 0") + // Run test + seeds, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), testMigrations) + // Check error + assert.NoError(t, err) + require.Len(t, seeds, 1) + assert.Equal(t, seeds[0].Path, pending[0]) + assert.Equal(t, seeds[0].Hash, "61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3") + assert.False(t, seeds[0].Dirty) + }) + + t.Run("finds dirty seeds", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SELECT_SEED_TABLE). + Reply("SELECT 1", SeedFile{Path: pending[0], Hash: "outdated"}) // Run test - err := SeedData(context.Background(), pending, conn.MockClient(t), testMigrations) + seeds, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), testMigrations) // Check error assert.NoError(t, err) + require.Len(t, seeds, 1) + assert.Equal(t, seeds[0].Path, pending[0]) + assert.Equal(t, seeds[0].Hash, "61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3") + assert.True(t, seeds[0].Dirty) + }) + + t.Run("skips applied seed", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SELECT_SEED_TABLE). + Reply("SELECT 1", SeedFile{Path: pending[0], Hash: "61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3"}) + // Run test + seeds, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), testMigrations) + // Check error + assert.NoError(t, err) + require.Empty(t, seeds) + }) + + t.Run("throws error on missing seed table", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SELECT_SEED_TABLE). + ReplyError(pgerrcode.UndefinedTable, `relation "seed_files" does not exist`) + // Run test + _, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), testMigrations) + // Check error + assert.ErrorContains(t, err, `ERROR: relation "seed_files" does not exist (SQLSTATE 42P01)`) }) t.Run("throws error on missing file", func(t *testing.T) { + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(SELECT_SEED_TABLE). + Reply("SELECT 0") // Setup in-memory fs fsys := fs.MapFS{} // Run test - err := SeedData(context.Background(), pending, nil, fsys) + _, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), fsys) // Check error assert.ErrorIs(t, err, os.ErrNotExist) }) +} + +func TestSeedData(t *testing.T) { + t.Run("seeds from file", func(t *testing.T) { + seed := SeedFile{ + Path: "testdata/seed.sql", + Hash: "61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3", + Dirty: true, + } + // Setup mock postgres + conn := pgtest.NewConn() + defer conn.Close(t) + conn.Query(UPSERT_SEED_FILE, seed.Path, seed.Hash). + Reply("INSERT 0 1") + // Run test + err := SeedData(context.Background(), []SeedFile{seed}, conn.MockClient(t), testMigrations) + // Check error + assert.NoError(t, err) + }) - t.Run("throws error on insert failure", func(t *testing.T) { + t.Run("throws error on upsert failure", func(t *testing.T) { + seed := SeedFile{ + Path: "testdata/seed.sql", + Hash: "61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3", + } // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(testSeed). + conn.Query(testSeed+`;INSERT INTO supabase_migrations.seed_files(path, hash) VALUES( 'testdata/seed.sql' , '61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3' ) ON CONFLICT (path) DO UPDATE SET hash = EXCLUDED.hash`). ReplyError(pgerrcode.NotNullViolation, `null value in column "age" of relation "employees"`) // Run test - err := SeedData(context.Background(), pending, conn.MockClient(t), testMigrations) + err := SeedData(context.Background(), []SeedFile{seed}, conn.MockClient(t, func(cc *pgx.ConnConfig) { + cc.PreferSimpleProtocol = true + }), testMigrations) // Check error assert.ErrorContains(t, err, `ERROR: null value in column "age" of relation "employees" (SQLSTATE 23502)`) }) diff --git a/pkg/pgtest/mock.go b/pkg/pgtest/mock.go index b73ad255a..0f5e9bb84 100644 --- a/pkg/pgtest/mock.go +++ b/pkg/pgtest/mock.go @@ -153,7 +153,10 @@ func (r *MockConn) Reply(tag string, rows ...interface{}) *MockConn { } else if t := reflect.TypeOf(rows[0]); t.Kind() == reflect.Struct { s := reflect.ValueOf(rows[0]) for i := 0; i < s.NumField(); i++ { - name := t.Field(i).Name + name := pgxv5.GetColumnName(t.Field(i)) + if len(name) == 0 { + continue + } v := s.Field(i).Interface() if fd := toFieldDescription(v); fd != nil { fd.Name = []byte(name) @@ -182,6 +185,9 @@ func (r *MockConn) Reply(tag string, rows ...interface{}) *MockConn { } else if t := reflect.TypeOf(data); t.Kind() == reflect.Struct { s := reflect.ValueOf(rows[0]) for i := 0; i < s.NumField(); i++ { + if name := pgxv5.GetColumnName(t.Field(i)); len(name) == 0 { + continue + } v := s.Field(i).Interface() if value, oid := r.encodeValueArg(v); oid > 0 { dr.Values = append(dr.Values, value) diff --git a/pkg/pgxv5/rows.go b/pkg/pgxv5/rows.go index 299ce868c..96aa16f5a 100644 --- a/pkg/pgxv5/rows.go +++ b/pkg/pgxv5/rows.go @@ -104,18 +104,11 @@ func appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs [ return nil, err } } else { - dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey) - if dbTagPresent { - dbTag = strings.Split(dbTag, ",")[0] - } - if dbTag == "-" { + colName := GetColumnName(sf) + if len(colName) == 0 { // Field is ignored, skip it. continue } - colName := dbTag - if !dbTagPresent { - colName = sf.Name - } fpos := fieldPosByName(fldDescs, colName) if fpos == -1 || fpos >= len(scanTargets) { return nil, errors.Errorf("cannot find field %s in returned row", colName) @@ -126,3 +119,14 @@ func appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs [ return scanTargets, err } + +func GetColumnName(sf reflect.StructField) string { + dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey) + if !dbTagPresent { + return sf.Name + } + if dbTag = strings.Split(dbTag, ",")[0]; dbTag != "-" { + return dbTag + } + return "" +} From b906fc56014db30b71a817bc3fb58c448d8c1e34 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Fri, 4 Oct 2024 23:33:33 +0800 Subject: [PATCH 24/28] chore: reduce code duplication --- internal/db/reset/reset.go | 15 +-------------- internal/db/start/start.go | 6 +++--- internal/db/start/start_test.go | 8 ++++---- internal/migration/apply/apply.go | 6 +++--- 4 files changed, 11 insertions(+), 24 deletions(-) diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index 88e595047..a9d2f4f4f 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -100,11 +100,6 @@ func resetDatabase14(ctx context.Context, version string, fsys afero.Fs, options return err } defer conn.Close(context.Background()) - if utils.Config.Db.MajorVersion > 14 { - if err := start.SetupDatabase(ctx, conn, utils.DbId, os.Stderr, fsys); err != nil { - return err - } - } return apply.MigrateAndSeed(ctx, version, conn, fsys) } @@ -131,15 +126,7 @@ func resetDatabase15(ctx context.Context, version string, fsys afero.Fs, options if err := start.WaitForHealthyService(ctx, start.HealthTimeout, utils.DbId); err != nil { return err } - conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) - if err != nil { - return err - } - defer conn.Close(context.Background()) - if err := start.SetupDatabase(ctx, conn, utils.DbId, os.Stderr, fsys); err != nil { - return err - } - if err := apply.MigrateAndSeed(ctx, version, conn, fsys); err != nil { + if err := start.SetupLocalDatabase(ctx, version, fsys, os.Stderr, options...); err != nil { return err } fmt.Fprintln(os.Stderr, "Restarting containers...") diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 912f845df..5f2364e5d 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -147,7 +147,7 @@ EOF } // Initialize if we are on PG14 and there's no existing db volume if utils.NoBackupVolume { - if err := setupDatabase(ctx, fsys, w, options...); err != nil { + if err := SetupLocalDatabase(ctx, "", fsys, w, options...); err != nil { return err } } @@ -308,7 +308,7 @@ func initSchema15(ctx context.Context, host string) error { return nil } -func setupDatabase(ctx context.Context, fsys afero.Fs, w io.Writer, options ...func(*pgx.ConnConfig)) error { +func SetupLocalDatabase(ctx context.Context, version string, fsys afero.Fs, w io.Writer, options ...func(*pgx.ConnConfig)) error { conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) if err != nil { return err @@ -317,7 +317,7 @@ func setupDatabase(ctx context.Context, fsys afero.Fs, w io.Writer, options ...f if err := SetupDatabase(ctx, conn, utils.DbId, w, fsys); err != nil { return err } - return apply.MigrateAndSeed(ctx, "", conn, fsys) + return apply.MigrateAndSeed(ctx, version, conn, fsys) } func SetupDatabase(ctx context.Context, conn *pgx.Conn, host string, w io.Writer, fsys afero.Fs) error { diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go index 8d1816b0b..24ba7f0ee 100644 --- a/internal/db/start/start_test.go +++ b/internal/db/start/start_test.go @@ -259,7 +259,7 @@ func TestSetupDatabase(t *testing.T) { Query(roles). Reply("CREATE ROLE") // Run test - err := setupDatabase(context.Background(), fsys, io.Discard, conn.Intercept) + err := SetupLocalDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) // Check error assert.NoError(t, err) assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -268,7 +268,7 @@ func TestSetupDatabase(t *testing.T) { t.Run("throws error on connect failure", func(t *testing.T) { utils.Config.Db.Port = 0 // Run test - err := setupDatabase(context.Background(), nil, io.Discard) + err := SetupLocalDatabase(context.Background(), "", nil, io.Discard) // Check error assert.ErrorContains(t, err, "invalid port (outside range)") }) @@ -285,7 +285,7 @@ func TestSetupDatabase(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) // Run test - err := setupDatabase(context.Background(), nil, io.Discard, conn.Intercept) + err := SetupLocalDatabase(context.Background(), "", nil, io.Discard, conn.Intercept) // Check error assert.ErrorContains(t, err, "network error") assert.Empty(t, apitest.ListUnmatchedRequests()) @@ -308,7 +308,7 @@ func TestSetupDatabase(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) // Run test - err := setupDatabase(context.Background(), fsys, io.Discard, conn.Intercept) + err := SetupLocalDatabase(context.Background(), "", fsys, io.Discard, conn.Intercept) // Check error assert.ErrorIs(t, err, os.ErrPermission) assert.Empty(t, apitest.ListUnmatchedRequests()) diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index f8c466259..50d65d356 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -20,13 +20,13 @@ func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys af if err := migration.ApplyMigrations(ctx, migrations, conn, afero.NewIOFS(fsys)); err != nil { return err } - if !utils.Config.Db.Seed.Enabled { - return nil - } return SeedDatabase(ctx, conn, fsys) } func SeedDatabase(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { + if !utils.Config.Db.Seed.Enabled { + return nil + } seeds, err := migration.GetPendingSeeds(ctx, utils.Config.Db.Seed.SqlPaths, conn, afero.NewIOFS(fsys)) if err != nil { return err From dffb8d6ec6cf129c5c261e05d1ccbbdc751e90ec Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Sat, 5 Oct 2024 00:19:10 +0800 Subject: [PATCH 25/28] chore: fix unit tests --- internal/db/push/push_test.go | 15 +++++++++++---- internal/db/start/start_test.go | 10 ++-------- internal/link/link_test.go | 3 ++- internal/migration/apply/apply.go | 4 ++-- internal/testing/helper/history.go | 4 +++- pkg/migration/apply_test.go | 7 +++++-- pkg/migration/seed.go | 7 +++++-- pkg/migration/seed_test.go | 4 ++-- 8 files changed, 32 insertions(+), 22 deletions(-) diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index 1616216d9..d7de969af 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -2,6 +2,8 @@ package push import ( "context" + "crypto/sha256" + "encoding/hex" "os" "path/filepath" "testing" @@ -161,24 +163,29 @@ func TestPushAll(t *testing.T) { }) t.Run("throws error on seed failure", func(t *testing.T) { + digest := hex.EncodeToString(sha256.New().Sum(nil)) seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") utils.Config.Db.Seed.SqlPaths = []string{seedPath} // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: seedPath} - _, _ = fsys.Create(seedPath) + fsys := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fsys, seedPath, []byte{}, 0644)) path := filepath.Join(utils.MigrationsDir, "0_test.sql") require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) conn.Query(migration.LIST_MIGRATION_VERSION). + Reply("SELECT 0"). + Query(migration.SELECT_SEED_TABLE). Reply("SELECT 0") helper.MockMigrationHistory(conn). Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). - Reply("INSERT 0 1") + Reply("INSERT 0 1"). + Query(migration.UPSERT_SEED_FILE, seedPath, digest). + ReplyError(pgerrcode.NotNullViolation, `null value in column "hash" of relation "seed_files"`) // Run test err := Run(context.Background(), false, false, false, true, dbConfig, fsys, conn.Intercept) // Check error - assert.ErrorIs(t, err, os.ErrPermission) + assert.ErrorContains(t, err, `ERROR: null value in column "hash" of relation "seed_files" (SQLSTATE 23502)`) }) } diff --git a/internal/db/start/start_test.go b/internal/db/start/start_test.go index 24ba7f0ee..96d97da8c 100644 --- a/internal/db/start/start_test.go +++ b/internal/db/start/start_test.go @@ -6,7 +6,6 @@ import ( "io" "net/http" "os" - "path/filepath" "testing" "github.com/docker/docker/api/types" @@ -52,8 +51,6 @@ func TestInitBranch(t *testing.T) { func TestStartDatabase(t *testing.T) { t.Run("initialize main branch", func(t *testing.T) { - seedPath := filepath.Join(utils.SupabaseDirPath, "seed.sql") - utils.Config.Db.Seed.SqlPaths = []string{seedPath} utils.Config.Db.MajorVersion = 15 utils.DbId = "supabase_db_test" utils.ConfigId = "supabase_config_test" @@ -62,8 +59,6 @@ func TestStartDatabase(t *testing.T) { fsys := afero.NewMemMapFs() roles := "create role test" require.NoError(t, afero.WriteFile(fsys, utils.CustomRolesPath, []byte(roles), 0644)) - seed := "INSERT INTO employees(name) VALUES ('Alice')" - require.NoError(t, afero.WriteFile(fsys, seedPath, []byte(seed), 0644)) // Setup mock docker require.NoError(t, apitest.MockDocker(utils.Docker)) defer gock.OffAll() @@ -91,9 +86,7 @@ func TestStartDatabase(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) conn.Query(roles). - Reply("CREATE ROLE"). - Query(seed). - Reply("INSERT 0 1") + Reply("CREATE ROLE") // Run test err := StartDatabase(context.Background(), fsys, io.Discard, conn.Intercept) // Check error @@ -274,6 +267,7 @@ func TestSetupDatabase(t *testing.T) { }) t.Run("throws error on init failure", func(t *testing.T) { + utils.Config.Realtime.Enabled = true utils.Config.Db.Port = 5432 // Setup mock docker require.NoError(t, apitest.MockDocker(utils.Docker)) diff --git a/internal/link/link_test.go b/internal/link/link_test.go index f33961293..af17eb9f5 100644 --- a/internal/link/link_test.go +++ b/internal/link/link_test.go @@ -313,7 +313,8 @@ func TestLinkDatabase(t *testing.T) { Query(migration.CREATE_VERSION_TABLE). ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations"). Query(migration.ADD_STATEMENTS_COLUMN). - Query(migration.ADD_NAME_COLUMN) + Query(migration.ADD_NAME_COLUMN). + Query(migration.CREATE_SEED_TABLE) // Run test err := linkDatabase(context.Background(), dbConfig, conn.Intercept) // Check error diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index 50d65d356..698a3ae53 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -20,10 +20,10 @@ func MigrateAndSeed(ctx context.Context, version string, conn *pgx.Conn, fsys af if err := migration.ApplyMigrations(ctx, migrations, conn, afero.NewIOFS(fsys)); err != nil { return err } - return SeedDatabase(ctx, conn, fsys) + return applySeedFiles(ctx, conn, fsys) } -func SeedDatabase(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { +func applySeedFiles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { if !utils.Config.Db.Seed.Enabled { return nil } diff --git a/internal/testing/helper/history.go b/internal/testing/helper/history.go index b73d267e8..30f4a0ca2 100644 --- a/internal/testing/helper/history.go +++ b/internal/testing/helper/history.go @@ -14,6 +14,8 @@ func MockMigrationHistory(conn *pgtest.MockConn) *pgtest.MockConn { Query(migration.ADD_STATEMENTS_COLUMN). Reply("ALTER TABLE"). Query(migration.ADD_NAME_COLUMN). - Reply("ALTER TABLE") + Reply("ALTER TABLE"). + Query(migration.CREATE_SEED_TABLE). + Reply("CREATE TABLE") return conn } diff --git a/pkg/migration/apply_test.go b/pkg/migration/apply_test.go index 65a75028a..7415dfce2 100644 --- a/pkg/migration/apply_test.go +++ b/pkg/migration/apply_test.go @@ -130,7 +130,8 @@ func TestApplyMigrations(t *testing.T) { Query(CREATE_VERSION_TABLE). ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations"). Query(ADD_STATEMENTS_COLUMN). - Query(ADD_NAME_COLUMN) + Query(ADD_NAME_COLUMN). + Query(CREATE_SEED_TABLE) // Run test err := ApplyMigrations(context.Background(), pending, conn.MockClient(t), fsys) // Check error @@ -175,6 +176,8 @@ func mockMigrationHistory(conn *pgtest.MockConn) *pgtest.MockConn { Query(ADD_STATEMENTS_COLUMN). Reply("ALTER TABLE"). Query(ADD_NAME_COLUMN). - Reply("ALTER TABLE") + Reply("ALTER TABLE"). + Query(CREATE_SEED_TABLE). + Reply("CREATE TABLE") return conn } diff --git a/pkg/migration/seed.go b/pkg/migration/seed.go index 6fd5aa43c..66dd876f6 100644 --- a/pkg/migration/seed.go +++ b/pkg/migration/seed.go @@ -19,9 +19,9 @@ func getRemoteSeeds(ctx context.Context, conn *pgx.Conn) (map[string]string, err var pgErr *pgconn.PgError if errors.As(err, &pgErr) && pgErr.Code == pgerrcode.UndefinedTable { // If seed table is undefined, the remote project has no migrations - return map[string]string{}, nil + return nil, nil } - return map[string]string{}, err + return nil, err } applied := make(map[string]string, len(remotes)) for _, seed := range remotes { @@ -31,6 +31,9 @@ func getRemoteSeeds(ctx context.Context, conn *pgx.Conn) (map[string]string, err } func GetPendingSeeds(ctx context.Context, locals []string, conn *pgx.Conn, fsys fs.FS) ([]SeedFile, error) { + if len(locals) == 0 { + return nil, nil + } applied, err := getRemoteSeeds(ctx, conn) if err != nil { return nil, err diff --git a/pkg/migration/seed_test.go b/pkg/migration/seed_test.go index db1865ace..f3ba81650 100644 --- a/pkg/migration/seed_test.go +++ b/pkg/migration/seed_test.go @@ -65,7 +65,7 @@ func TestPendingSeeds(t *testing.T) { require.Empty(t, seeds) }) - t.Run("throws error on missing seed table", func(t *testing.T) { + t.Run("ignores missing seed table", func(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) @@ -74,7 +74,7 @@ func TestPendingSeeds(t *testing.T) { // Run test _, err := GetPendingSeeds(context.Background(), pending, conn.MockClient(t), testMigrations) // Check error - assert.ErrorContains(t, err, `ERROR: relation "seed_files" does not exist (SQLSTATE 42P01)`) + assert.NoError(t, err) }) t.Run("throws error on missing file", func(t *testing.T) { From b3efd4872ff98d677ed084f85d8d815ef376cb97 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Sun, 6 Oct 2024 14:03:39 +0800 Subject: [PATCH 26/28] chore: prompt before pushing seed files --- internal/db/push/push.go | 65 +++++++++++++++++++++---------- internal/db/push/push_test.go | 2 +- internal/db/start/start.go | 6 ++- internal/migration/apply/apply.go | 10 ----- internal/utils/flags/db_url.go | 5 +++ pkg/migration/seed.go | 2 +- 6 files changed, 57 insertions(+), 33 deletions(-) diff --git a/internal/db/push/push.go b/internal/db/push/push.go index 226a76751..f52d78974 100644 --- a/internal/db/push/push.go +++ b/internal/db/push/push.go @@ -10,7 +10,6 @@ import ( "github.com/jackc/pgconn" "github.com/jackc/pgx/v4" "github.com/spf13/afero" - "github.com/supabase/cli/internal/migration/apply" "github.com/supabase/cli/internal/migration/up" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/pkg/migration" @@ -36,42 +35,68 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, return err } } - if len(pending) == 0 && len(seeds) == 0 { + var globals []string + if includeRoles { + if exists, err := afero.Exists(fsys, utils.CustomRolesPath); err != nil { + return errors.Errorf("failed to find custom roles: %w", err) + } else if exists { + globals = append(globals, utils.CustomRolesPath) + } + } + if len(pending) == 0 && len(seeds) == 0 && len(globals) == 0 { fmt.Println("Remote database is up to date.") return nil } // Push pending migrations if dryRun { - if includeRoles { - fmt.Fprintln(os.Stderr, "Would create custom roles "+utils.Bold(utils.CustomRolesPath)+"...") + if len(globals) > 0 { + fmt.Fprintln(os.Stderr, "Would create custom roles "+utils.Bold(globals[0])+"...") } if len(pending) > 0 { fmt.Fprintln(os.Stderr, "Would push these migrations:") - fmt.Fprint(os.Stderr, utils.Bold(confirmPushAll(pending))) + fmt.Fprint(os.Stderr, confirmPushAll(pending)) } - if includeSeed && len(seeds) > 0 { + if len(seeds) > 0 { fmt.Fprintln(os.Stderr, "Would seed these files:") - fmt.Fprint(os.Stderr, utils.Bold(confirmSeedAll(seeds))) + fmt.Fprint(os.Stderr, confirmSeedAll(seeds)) } } else { - msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) - if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { - return err - } else if !shouldPush { - return errors.New(context.Canceled) - } - if includeRoles { - if err := apply.CreateCustomRoles(ctx, conn, fsys); err != nil { + if len(globals) > 0 { + msg := "Do you want to create custom roles in the database cluster?" + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } + if err := migration.SeedGlobals(ctx, globals, conn, afero.NewIOFS(fsys)); err != nil { return err } } - if err := migration.ApplyMigrations(ctx, pending, conn, afero.NewIOFS(fsys)); err != nil { - return err + if len(pending) > 0 { + msg := fmt.Sprintf("Do you want to push these migrations to the remote database?\n%s\n", confirmPushAll(pending)) + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } + if err := migration.ApplyMigrations(ctx, pending, conn, afero.NewIOFS(fsys)); err != nil { + return err + } + } else { + fmt.Fprintln(os.Stderr, "Schema migrations are up to date.") } - if includeSeed { + if len(seeds) > 0 { + msg := fmt.Sprintf("Do you want to seed the remote database with these files?\n%s\n", confirmSeedAll(seeds)) + if shouldPush, err := utils.NewConsole().PromptYesNo(ctx, msg, true); err != nil { + return err + } else if !shouldPush { + return errors.New(context.Canceled) + } if err := migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)); err != nil { return err } + } else if includeSeed { + fmt.Fprintln(os.Stderr, "Seed files are up to date.") } } fmt.Println("Finished " + utils.Aqua("supabase db push") + ".") @@ -81,7 +106,7 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, func confirmPushAll(pending []string) (msg string) { for _, path := range pending { filename := filepath.Base(path) - msg += fmt.Sprintf(" • %s\n", filename) + msg += fmt.Sprintf(" • %s\n", utils.Bold(filename)) } return msg } @@ -92,7 +117,7 @@ func confirmSeedAll(pending []migration.SeedFile) (msg string) { if seed.Dirty { notice += " (hash update)" } - msg += fmt.Sprintf(" • %s\n", notice) + msg += fmt.Sprintf(" • %s\n", utils.Bold(notice)) } return msg } diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index d7de969af..631c2cc7e 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -148,7 +148,7 @@ func TestPushAll(t *testing.T) { t.Run("throws error on roles failure", func(t *testing.T) { // Setup in-memory fs - fsys := &fstest.OpenErrorFs{DenyPath: utils.CustomRolesPath} + fsys := &fstest.StatErrorFs{DenyPath: utils.CustomRolesPath} path := filepath.Join(utils.MigrationsDir, "0_test.sql") require.NoError(t, afero.WriteFile(fsys, path, []byte{}, 0644)) // Setup mock postgres diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 5f2364e5d..822e9aff6 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -324,5 +324,9 @@ func SetupDatabase(ctx context.Context, conn *pgx.Conn, host string, w io.Writer if err := initSchema(ctx, conn, host, w); err != nil { return err } - return apply.CreateCustomRoles(ctx, conn, fsys) + err := migration.SeedGlobals(ctx, []string{utils.CustomRolesPath}, conn, afero.NewIOFS(fsys)) + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err } diff --git a/internal/migration/apply/apply.go b/internal/migration/apply/apply.go index 698a3ae53..224b342f7 100644 --- a/internal/migration/apply/apply.go +++ b/internal/migration/apply/apply.go @@ -2,9 +2,7 @@ package apply import ( "context" - "os" - "github.com/go-errors/errors" "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/migration/list" @@ -33,11 +31,3 @@ func applySeedFiles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { } return migration.SeedData(ctx, seeds, conn, afero.NewIOFS(fsys)) } - -func CreateCustomRoles(ctx context.Context, conn *pgx.Conn, fsys afero.Fs) error { - err := migration.SeedGlobals(ctx, []string{utils.CustomRolesPath}, conn, afero.NewIOFS(fsys)) - if errors.Is(err, os.ErrNotExist) { - return nil - } - return err -} diff --git a/internal/utils/flags/db_url.go b/internal/utils/flags/db_url.go index 6fc912179..d089f2d13 100644 --- a/internal/utils/flags/db_url.go +++ b/internal/utils/flags/db_url.go @@ -50,6 +50,11 @@ func ParseDatabaseConfig(flagSet *pflag.FlagSet, fsys afero.Fs) error { // Update connection config switch connType { case direct: + if err := utils.Config.Load("", utils.NewRootFS(fsys)); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } if flag := flagSet.Lookup("db-url"); flag != nil { config, err := pgconn.ParseConfig(flag.Value.String()) if err != nil { diff --git a/pkg/migration/seed.go b/pkg/migration/seed.go index 66dd876f6..44ac67ae0 100644 --- a/pkg/migration/seed.go +++ b/pkg/migration/seed.go @@ -60,7 +60,7 @@ func GetPendingSeeds(ctx context.Context, locals []string, conn *pgx.Conn, fsys func SeedData(ctx context.Context, pending []SeedFile, conn *pgx.Conn, fsys fs.FS) error { for _, seed := range pending { if seed.Dirty { - fmt.Fprintf(os.Stderr, "Updating seed file hash %s...\n", seed.Path) + fmt.Fprintf(os.Stderr, "Updating seed hash to %s...\n", seed.Path) } else { fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", seed.Path) } From c805fc64e1ebc88d375541c224400cebc5d60619 Mon Sep 17 00:00:00 2001 From: Qiao Han Date: Mon, 7 Oct 2024 13:08:25 +0800 Subject: [PATCH 27/28] chore: create seed table separately --- internal/db/push/push_test.go | 3 ++- internal/link/link.go | 5 ++++- internal/link/link_test.go | 6 ++++-- internal/testing/helper/history.go | 9 ++++++++- pkg/migration/apply_test.go | 7 ++----- pkg/migration/history.go | 14 +++++++++++++- pkg/migration/seed.go | 5 +++++ pkg/migration/seed_test.go | 15 +++++++++++++-- 8 files changed, 51 insertions(+), 13 deletions(-) diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index 631c2cc7e..3a3ff3cda 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -180,7 +180,8 @@ func TestPushAll(t *testing.T) { Reply("SELECT 0") helper.MockMigrationHistory(conn). Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). - Reply("INSERT 0 1"). + Reply("INSERT 0 1") + helper.MockSeedHistory(conn). Query(migration.UPSERT_SEED_FILE, seedPath, digest). ReplyError(pgerrcode.NotNullViolation, `null value in column "hash" of relation "seed_files"`) // Run test diff --git a/internal/link/link.go b/internal/link/link.go index 3a2831070..1d3824b84 100644 --- a/internal/link/link.go +++ b/internal/link/link.go @@ -183,7 +183,10 @@ func linkDatabase(ctx context.Context, config pgconn.Config, options ...func(*pg defer conn.Close(context.Background()) updatePostgresConfig(conn) // If `schema_migrations` doesn't exist on the remote database, create it. - return migration.CreateMigrationTable(ctx, conn) + if err := migration.CreateMigrationTable(ctx, conn); err != nil { + return err + } + return migration.CreateSeedTable(ctx, conn) } func linkDatabaseVersion(ctx context.Context, projectRef string, fsys afero.Fs) error { diff --git a/internal/link/link_test.go b/internal/link/link_test.go index af17eb9f5..edc95d8fa 100644 --- a/internal/link/link_test.go +++ b/internal/link/link_test.go @@ -46,6 +46,7 @@ func TestLinkCommand(t *testing.T) { conn := pgtest.NewConn() defer conn.Close(t) helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) // Flush pending mocks after test execution defer gock.OffAll() gock.New(utils.DefaultApiHost). @@ -279,6 +280,7 @@ func TestLinkDatabase(t *testing.T) { }) defer conn.Close(t) helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) // Run test err := linkDatabase(context.Background(), dbConfig, conn.Intercept) // Check error @@ -294,6 +296,7 @@ func TestLinkDatabase(t *testing.T) { }) defer conn.Close(t) helper.MockMigrationHistory(conn) + helper.MockSeedHistory(conn) // Run test err := linkDatabase(context.Background(), dbConfig, conn.Intercept) // Check error @@ -313,8 +316,7 @@ func TestLinkDatabase(t *testing.T) { Query(migration.CREATE_VERSION_TABLE). ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations"). Query(migration.ADD_STATEMENTS_COLUMN). - Query(migration.ADD_NAME_COLUMN). - Query(migration.CREATE_SEED_TABLE) + Query(migration.ADD_NAME_COLUMN) // Run test err := linkDatabase(context.Background(), dbConfig, conn.Intercept) // Check error diff --git a/internal/testing/helper/history.go b/internal/testing/helper/history.go index 30f4a0ca2..95c846b7a 100644 --- a/internal/testing/helper/history.go +++ b/internal/testing/helper/history.go @@ -14,7 +14,14 @@ func MockMigrationHistory(conn *pgtest.MockConn) *pgtest.MockConn { Query(migration.ADD_STATEMENTS_COLUMN). Reply("ALTER TABLE"). Query(migration.ADD_NAME_COLUMN). - Reply("ALTER TABLE"). + Reply("ALTER TABLE") + return conn +} + +func MockSeedHistory(conn *pgtest.MockConn) *pgtest.MockConn { + conn.Query(migration.SET_LOCK_TIMEOUT). + Query(migration.CREATE_VERSION_SCHEMA). + Reply("CREATE SCHEMA"). Query(migration.CREATE_SEED_TABLE). Reply("CREATE TABLE") return conn diff --git a/pkg/migration/apply_test.go b/pkg/migration/apply_test.go index 7415dfce2..65a75028a 100644 --- a/pkg/migration/apply_test.go +++ b/pkg/migration/apply_test.go @@ -130,8 +130,7 @@ func TestApplyMigrations(t *testing.T) { Query(CREATE_VERSION_TABLE). ReplyError(pgerrcode.InsufficientPrivilege, "permission denied for relation supabase_migrations"). Query(ADD_STATEMENTS_COLUMN). - Query(ADD_NAME_COLUMN). - Query(CREATE_SEED_TABLE) + Query(ADD_NAME_COLUMN) // Run test err := ApplyMigrations(context.Background(), pending, conn.MockClient(t), fsys) // Check error @@ -176,8 +175,6 @@ func mockMigrationHistory(conn *pgtest.MockConn) *pgtest.MockConn { Query(ADD_STATEMENTS_COLUMN). Reply("ALTER TABLE"). Query(ADD_NAME_COLUMN). - Reply("ALTER TABLE"). - Query(CREATE_SEED_TABLE). - Reply("CREATE TABLE") + Reply("ALTER TABLE") return conn } diff --git a/pkg/migration/history.go b/pkg/migration/history.go index 832651874..5ff77ae6b 100644 --- a/pkg/migration/history.go +++ b/pkg/migration/history.go @@ -36,7 +36,6 @@ func CreateMigrationTable(ctx context.Context, conn *pgx.Conn) error { batch.ExecParams(CREATE_VERSION_TABLE, nil, nil, nil, nil) batch.ExecParams(ADD_STATEMENTS_COLUMN, nil, nil, nil, nil) batch.ExecParams(ADD_NAME_COLUMN, nil, nil, nil, nil) - batch.ExecParams(CREATE_SEED_TABLE, nil, nil, nil, nil) if _, err := conn.PgConn().ExecBatch(ctx, &batch).ReadAll(); err != nil { return errors.Errorf("failed to create migration table: %w", err) } @@ -51,6 +50,19 @@ func ReadMigrationTable(ctx context.Context, conn *pgx.Conn) ([]MigrationFile, e return pgxv5.CollectRows[MigrationFile](rows) } +func CreateSeedTable(ctx context.Context, conn *pgx.Conn) error { + // This must be run without prepared statements because each statement in the batch depends on + // the previous schema change. The lock timeout will be reset when implicit transaction ends. + batch := pgconn.Batch{} + batch.ExecParams(SET_LOCK_TIMEOUT, nil, nil, nil, nil) + batch.ExecParams(CREATE_VERSION_SCHEMA, nil, nil, nil, nil) + batch.ExecParams(CREATE_SEED_TABLE, nil, nil, nil, nil) + if _, err := conn.PgConn().ExecBatch(ctx, &batch).ReadAll(); err != nil { + return errors.Errorf("failed to create migration table: %w", err) + } + return nil +} + func ReadSeedTable(ctx context.Context, conn *pgx.Conn) ([]SeedFile, error) { rows, err := conn.Query(ctx, SELECT_SEED_TABLE) if err != nil { diff --git a/pkg/migration/seed.go b/pkg/migration/seed.go index 44ac67ae0..988e8d4bc 100644 --- a/pkg/migration/seed.go +++ b/pkg/migration/seed.go @@ -58,6 +58,11 @@ func GetPendingSeeds(ctx context.Context, locals []string, conn *pgx.Conn, fsys } func SeedData(ctx context.Context, pending []SeedFile, conn *pgx.Conn, fsys fs.FS) error { + if len(pending) > 0 { + if err := CreateSeedTable(ctx, conn); err != nil { + return err + } + } for _, seed := range pending { if seed.Dirty { fmt.Fprintf(os.Stderr, "Updating seed hash to %s...\n", seed.Path) diff --git a/pkg/migration/seed_test.go b/pkg/migration/seed_test.go index f3ba81650..7e9cd4aca 100644 --- a/pkg/migration/seed_test.go +++ b/pkg/migration/seed_test.go @@ -102,7 +102,8 @@ func TestSeedData(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(UPSERT_SEED_FILE, seed.Path, seed.Hash). + mockSeedHistory(conn). + Query(UPSERT_SEED_FILE, seed.Path, seed.Hash). Reply("INSERT 0 1") // Run test err := SeedData(context.Background(), []SeedFile{seed}, conn.MockClient(t), testMigrations) @@ -118,7 +119,8 @@ func TestSeedData(t *testing.T) { // Setup mock postgres conn := pgtest.NewConn() defer conn.Close(t) - conn.Query(testSeed+`;INSERT INTO supabase_migrations.seed_files(path, hash) VALUES( 'testdata/seed.sql' , '61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3' ) ON CONFLICT (path) DO UPDATE SET hash = EXCLUDED.hash`). + mockSeedHistory(conn). + Query(testSeed+`;INSERT INTO supabase_migrations.seed_files(path, hash) VALUES( 'testdata/seed.sql' , '61868484fc0ddca2a2022217629a9fd9a4cf1ca479432046290797d6d40ffcc3' ) ON CONFLICT (path) DO UPDATE SET hash = EXCLUDED.hash`). ReplyError(pgerrcode.NotNullViolation, `null value in column "age" of relation "employees"`) // Run test err := SeedData(context.Background(), []SeedFile{seed}, conn.MockClient(t, func(cc *pgx.ConnConfig) { @@ -129,6 +131,15 @@ func TestSeedData(t *testing.T) { }) } +func mockSeedHistory(conn *pgtest.MockConn) *pgtest.MockConn { + conn.Query(SET_LOCK_TIMEOUT). + Query(CREATE_VERSION_SCHEMA). + Reply("CREATE SCHEMA"). + Query(CREATE_SEED_TABLE). + Reply("CREATE TABLE") + return conn +} + //go:embed testdata/1_globals.sql var testGlobals string From e642265f0d1a27a776538c520cc80de91736b25d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:31:19 +0800 Subject: [PATCH 28/28] chore(deps): bump bin-links from 4.0.4 to 5.0.0 (#2712) Bumps [bin-links](https://github.com/npm/bin-links) from 4.0.4 to 5.0.0. - [Release notes](https://github.com/npm/bin-links/releases) - [Changelog](https://github.com/npm/bin-links/blob/main/CHANGELOG.md) - [Commits](https://github.com/npm/bin-links/compare/v4.0.4...v5.0.0) --- updated-dependencies: - dependency-name: bin-links dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 54a6ae29f..dd3014d22 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,7 @@ "supabase": "bin/supabase" }, "dependencies": { - "bin-links": "^4.0.3", + "bin-links": "^5.0.0", "https-proxy-agent": "^7.0.2", "node-fetch": "^3.3.2", "tar": "7.4.3"