From f0bd397dcd8378aabdaa97b052c8cbf4256bf7e8 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Thu, 16 Oct 2025 15:06:42 +0700 Subject: [PATCH 1/5] raft: Add gossip configuration foundation with smart defaults Gitaly Raft currently lacks cluster-wide routing table discovery. Each Raft group maintains its routing table in isolation, preventing nodes from discovering partitions managed by other Raft groups. This limitation hinders cluster-wide partition routing and cross-group coordination. This commit implements the configuration foundation for gossip-based routing table propagation using HashiCorp memberlist protocol. The implementation provides smart defaults for all configuration fields, enabling zero-config operation while allowing advanced overrides when needed. The gossip configuration includes bind address/port, advertise address/port, and seed node list. All fields are optional with automatic defaults: 0.0.0.0 bind address, port 7946 (standard memberlist port), and auto-detected advertise address from hostname with 127.0.0.1 fallback. The configuration integrates into the Raft struct as an optional pointer field, remaining nil when not specified. Comprehensive validation ensures ports are in valid range (1-65535) and seed addresses follow host:port format. The configuration is entirely optional. When specified with partial values, the DefaultConfig() function fills in missing fields automatically. This approach balances ease of use with flexibility for complex network topologies. Dependencies: - Add github.com/hashicorp/memberlist v0.5.1 for gossip protocol --- go.mod | 4 +- go.sum | 12 +- internal/gitaly/config/config.go | 9 + internal/gitaly/config/config_test.go | 93 ++++++ internal/gitaly/storage/gossip/config.go | 193 +++++++++++ internal/gitaly/storage/gossip/config_test.go | 303 ++++++++++++++++++ .../gitaly/storage/gossip/testhelper_test.go | 11 + internal/gitaly/storage/gossip/types.go | 37 +++ 8 files changed, 657 insertions(+), 5 deletions(-) create mode 100644 internal/gitaly/storage/gossip/config.go create mode 100644 internal/gitaly/storage/gossip/config_test.go create mode 100644 internal/gitaly/storage/gossip/testhelper_test.go create mode 100644 internal/gitaly/storage/gossip/types.go diff --git a/go.mod b/go.mod index ff74ff321b9..077f89c6101 100644 --- a/go.mod +++ b/go.mod @@ -181,7 +181,7 @@ require ( github.com/olekukonko/errors v0.0.0-20250405072817-4e6d85265da6 // indirect github.com/olekukonko/ll v0.0.8 // indirect github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0 // indirect - github.com/philhofer/fwd v1.1.1 // indirect + github.com/philhofer/fwd v1.1.2 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect @@ -199,7 +199,7 @@ require ( github.com/shogo82148/go-shuffle v1.0.1 // indirect github.com/skeema/knownhosts v1.3.0 // indirect github.com/ssgelm/cookiejarparser v1.0.1 // indirect - github.com/tinylib/msgp v1.1.2 // indirect + github.com/tinylib/msgp v1.1.8 // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect diff --git a/go.sum b/go.sum index c6ea71dbae6..d30c6249168 100644 --- a/go.sum +++ b/go.sum @@ -554,8 +554,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= @@ -636,8 +636,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= @@ -770,6 +771,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -815,6 +817,7 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -926,6 +929,7 @@ golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -943,6 +947,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1013,6 +1018,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= diff --git a/internal/gitaly/config/config.go b/internal/gitaly/config/config.go index 3b0c90ccc05..c45fc5fd518 100644 --- a/internal/gitaly/config/config.go +++ b/internal/gitaly/config/config.go @@ -24,6 +24,7 @@ import ( "gitlab.com/gitlab-org/gitaly/v18/internal/gitaly/config/cgroups" "gitlab.com/gitlab-org/gitaly/v18/internal/gitaly/config/prometheus" "gitlab.com/gitlab-org/gitaly/v18/internal/gitaly/config/sentry" + "gitlab.com/gitlab-org/gitaly/v18/internal/gitaly/storage/gossip" "gitlab.com/gitlab-org/gitaly/v18/internal/gitaly/storage/mode" "gitlab.com/gitlab-org/gitaly/v18/internal/helper/duration" "gitlab.com/gitlab-org/gitaly/v18/internal/log" @@ -1365,6 +1366,9 @@ type Raft struct { ProposalConfChangeTimeout uint64 `json:"proposal_conf_change_timeout" toml:"proposal_conf_change_timeout"` // SnapshotDir is the directory where raft snapshots are stored. SnapshotDir string `json:"snapshot_dir" toml:"snapshot_dir"` // Default: /+gitaly/raft/snapshots + // Gossip contains configuration for the gossip protocol used to propagate routing table updates. + // All fields are optional and will use sensible defaults if not specified. + Gossip *gossip.Config `json:"gossip,omitempty" toml:"gossip,omitempty"` } const ( @@ -1433,6 +1437,11 @@ func (r Raft) Validate(transactions Transactions) error { } } + // Validate gossip configuration if provided + if r.Gossip != nil { + cfgErr = cfgErr.Append(r.Gossip.Validate(), "gossip") + } + return cfgErr.AsError() } diff --git a/internal/gitaly/config/config_test.go b/internal/gitaly/config/config_test.go index ae61e876564..66d9aed8229 100644 --- a/internal/gitaly/config/config_test.go +++ b/internal/gitaly/config/config_test.go @@ -3225,3 +3225,96 @@ func TestLoadDefaults(t *testing.T) { }, }, cfg) } + +func TestLoadRaftWithGossip(t *testing.T) { + t.Parallel() + + t.Run("raft without gossip config", func(t *testing.T) { + cfg, err := Load(strings.NewReader(` +[transactions] +enabled = true + +[raft] +enabled = true +cluster_id = "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb" +snapshot_dir = "` + testhelper.TempDir(t) + `" + `)) + require.NoError(t, err) + + require.True(t, cfg.Raft.Enabled) + require.Nil(t, cfg.Raft.Gossip, "gossip config should be nil when not specified") + }) + + t.Run("raft with minimal gossip config", func(t *testing.T) { + cfg, err := Load(strings.NewReader(` +[transactions] +enabled = true + +[raft] +enabled = true +cluster_id = "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb" +snapshot_dir = "` + testhelper.TempDir(t) + `" + + [raft.gossip] + `)) + require.NoError(t, err) + + require.True(t, cfg.Raft.Enabled) + require.NotNil(t, cfg.Raft.Gossip) + // Fields should be zero values, defaults are applied later + require.Equal(t, "", cfg.Raft.Gossip.BindAddr) + require.Equal(t, 0, cfg.Raft.Gossip.BindPort) + }) + + t.Run("raft with full gossip config", func(t *testing.T) { + cfg, err := Load(strings.NewReader(` +[transactions] +enabled = true + +[raft] +enabled = true +cluster_id = "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb" +snapshot_dir = "` + testhelper.TempDir(t) + `" + + [raft.gossip] + bind_addr = "192.168.1.100" + bind_port = 8000 + advertise_addr = "10.0.1.5" + advertise_port = 9000 + seeds = ["node1:7946", "node2:7946"] + `)) + require.NoError(t, err) + + require.True(t, cfg.Raft.Enabled) + require.NotNil(t, cfg.Raft.Gossip) + require.Equal(t, "192.168.1.100", cfg.Raft.Gossip.BindAddr) + require.Equal(t, 8000, cfg.Raft.Gossip.BindPort) + require.Equal(t, "10.0.1.5", cfg.Raft.Gossip.AdvertiseAddr) + require.Equal(t, 9000, cfg.Raft.Gossip.AdvertisePort) + require.Len(t, cfg.Raft.Gossip.Seeds, 2) + require.Contains(t, cfg.Raft.Gossip.Seeds, "node1:7946") + require.Contains(t, cfg.Raft.Gossip.Seeds, "node2:7946") + }) + + t.Run("raft with partial gossip config", func(t *testing.T) { + cfg, err := Load(strings.NewReader(` +[transactions] +enabled = true + +[raft] +enabled = true +cluster_id = "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb" +snapshot_dir = "` + testhelper.TempDir(t) + `" + + [raft.gossip] + advertise_addr = "10.0.1.5" + seeds = ["node1:7946"] + `)) + require.NoError(t, err) + + require.True(t, cfg.Raft.Enabled) + require.NotNil(t, cfg.Raft.Gossip) + require.Equal(t, "10.0.1.5", cfg.Raft.Gossip.AdvertiseAddr) + require.Len(t, cfg.Raft.Gossip.Seeds, 1) + }) +} diff --git a/internal/gitaly/storage/gossip/config.go b/internal/gitaly/storage/gossip/config.go new file mode 100644 index 00000000000..4b0f56b7b15 --- /dev/null +++ b/internal/gitaly/storage/gossip/config.go @@ -0,0 +1,193 @@ +package gossip + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" + + "gitlab.com/gitlab-org/gitaly/v18/internal/errors/cfgerror" +) + +// DefaultConfig returns a Config with sensible defaults filled in. +// If the user has provided a partial config, it applies those overrides on top of the defaults. +func DefaultConfig(userConfig *Config) *Config { + cfg := &Config{ + BindAddr: DefaultGossipBindAddr, + BindPort: DefaultGossipPort, + Seeds: []string{}, + } + + // Auto-detect advertise address if not provided + cfg.AdvertiseAddr = detectAdvertiseAddr("") + cfg.AdvertisePort = cfg.BindPort + + // Apply user overrides if provided + if userConfig != nil { + if userConfig.BindAddr != "" { + cfg.BindAddr = userConfig.BindAddr + } + if userConfig.BindPort != 0 { + cfg.BindPort = userConfig.BindPort + // Update AdvertisePort default to match custom BindPort + if userConfig.AdvertisePort == 0 { + cfg.AdvertisePort = userConfig.BindPort + } + } + if userConfig.AdvertiseAddr != "" { + cfg.AdvertiseAddr = userConfig.AdvertiseAddr + } + if userConfig.AdvertisePort != 0 { + cfg.AdvertisePort = userConfig.AdvertisePort + } + if len(userConfig.Seeds) > 0 { + cfg.Seeds = userConfig.Seeds + } + } + + return cfg +} + +// detectAdvertiseAddr attempts to auto-detect a suitable advertise address. +// It tries the following strategies in order: +// 1. Use the provided listenAddr if it's a valid non-0.0.0.0 address +// 2. Use the system hostname +// 3. Fall back to 127.0.0.1 +func detectAdvertiseAddr(listenAddr string) string { + // If listenAddr is provided and not 0.0.0.0, use it + if listenAddr != "" && listenAddr != "0.0.0.0" && listenAddr != "[::]" { + // Extract just the host part if it includes a port + host, _, err := net.SplitHostPort(listenAddr) + if err == nil { + return host + } + return listenAddr + } + + // Try to get the hostname + hostname, err := os.Hostname() + if err == nil && hostname != "" { + // Verify the hostname is resolvable + if addrs, err := net.LookupHost(hostname); err == nil && len(addrs) > 0 { + return hostname + } + } + + // Fall back to localhost + return "127.0.0.1" +} + +// Validate validates the gossip configuration and returns any validation errors. +func (cfg *Config) Validate() error { + if cfg == nil { + return nil + } + + errs := cfgerror.New() + + // Validate BindAddr + if cfg.BindAddr == "" { + errs = errs.Append(cfgerror.ErrNotSet, "bind_addr") + } + + // Validate BindPort + if err := validatePort(cfg.BindPort); err != nil { + errs = errs.Append(err, "bind_port") + } + + // Validate AdvertiseAddr + if cfg.AdvertiseAddr == "" { + errs = errs.Append(cfgerror.ErrNotSet, "advertise_addr") + } + + // Validate AdvertisePort + if err := validatePort(cfg.AdvertisePort); err != nil { + errs = errs.Append(err, "advertise_port") + } + + // Validate Seeds + for i, seed := range cfg.Seeds { + if err := validateSeedAddress(seed); err != nil { + errs = errs.Append(err, "seeds", fmt.Sprintf("[%d]", i)) + } + } + + return errs.AsError() +} + +// validatePort validates that a port number is in the valid range (1-65535). +func validatePort(port int) error { + if port < 1 || port > 65535 { + return fmt.Errorf("port must be between 1 and 65535, got %d", port) + } + return nil +} + +// validateSeedAddress validates that a seed address is well-formed (host:port). +func validateSeedAddress(seed string) error { + if seed == "" { + return cfgerror.ErrNotSet + } + + host, portStr, err := net.SplitHostPort(seed) + if err != nil { + return fmt.Errorf("invalid seed address format (expected host:port): %w", err) + } + + if host == "" { + return fmt.Errorf("seed address missing host") + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return fmt.Errorf("invalid port in seed address: %w", err) + } + + if err := validatePort(port); err != nil { + return err + } + + return nil +} + +// GetAdvertiseAddress returns the full advertise address in host:port format. +func (cfg *Config) GetAdvertiseAddress() string { + if cfg == nil { + return "" + } + return fmt.Sprintf("%s:%d", cfg.AdvertiseAddr, cfg.AdvertisePort) +} + +// GetBindAddress returns the full bind address in host:port format. +func (cfg *Config) GetBindAddress() string { + if cfg == nil { + return "" + } + return fmt.Sprintf("%s:%d", cfg.BindAddr, cfg.BindPort) +} + +// ParseSeedAddresses returns a list of seed addresses, filtering out empty entries +// and the node's own advertise address to prevent self-connection. +func (cfg *Config) ParseSeedAddresses() []string { + if cfg == nil { + return nil + } + + ownAddr := cfg.GetAdvertiseAddress() + seeds := make([]string, 0, len(cfg.Seeds)) + + for _, seed := range cfg.Seeds { + seed = strings.TrimSpace(seed) + if seed == "" { + continue + } + // Don't include our own address as a seed + if seed == ownAddr { + continue + } + seeds = append(seeds, seed) + } + + return seeds +} diff --git a/internal/gitaly/storage/gossip/config_test.go b/internal/gitaly/storage/gossip/config_test.go new file mode 100644 index 00000000000..9d7f55a569f --- /dev/null +++ b/internal/gitaly/storage/gossip/config_test.go @@ -0,0 +1,303 @@ +package gossip + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultConfig(t *testing.T) { + t.Parallel() + + t.Run("with nil user config", func(t *testing.T) { + cfg := DefaultConfig(nil) + + assert.Equal(t, DefaultGossipBindAddr, cfg.BindAddr) + assert.Equal(t, DefaultGossipPort, cfg.BindPort) + assert.Equal(t, DefaultGossipPort, cfg.AdvertisePort) + assert.NotEmpty(t, cfg.AdvertiseAddr, "advertise address should be auto-detected") + assert.Empty(t, cfg.Seeds) + }) + + t.Run("with empty user config", func(t *testing.T) { + cfg := DefaultConfig(&Config{}) + + assert.Equal(t, DefaultGossipBindAddr, cfg.BindAddr) + assert.Equal(t, DefaultGossipPort, cfg.BindPort) + assert.Equal(t, DefaultGossipPort, cfg.AdvertisePort) + assert.NotEmpty(t, cfg.AdvertiseAddr) + assert.Empty(t, cfg.Seeds) + }) + + t.Run("with partial overrides", func(t *testing.T) { + userCfg := &Config{ + BindPort: 8000, + Seeds: []string{"node1:7946", "node2:7946"}, + } + + cfg := DefaultConfig(userCfg) + + assert.Equal(t, DefaultGossipBindAddr, cfg.BindAddr, "should use default bind addr") + assert.Equal(t, 8000, cfg.BindPort, "should use custom bind port") + assert.Equal(t, 8000, cfg.AdvertisePort, "should default advertise port to custom bind port") + assert.NotEmpty(t, cfg.AdvertiseAddr) + assert.Len(t, cfg.Seeds, 2) + }) + + t.Run("with all overrides", func(t *testing.T) { + userCfg := &Config{ + BindAddr: "192.168.1.1", + BindPort: 8000, + AdvertiseAddr: "10.0.1.5", + AdvertisePort: 9000, + Seeds: []string{"node1:7946"}, + } + + cfg := DefaultConfig(userCfg) + + assert.Equal(t, "192.168.1.1", cfg.BindAddr) + assert.Equal(t, 8000, cfg.BindPort) + assert.Equal(t, "10.0.1.5", cfg.AdvertiseAddr) + assert.Equal(t, 9000, cfg.AdvertisePort) + assert.Len(t, cfg.Seeds, 1) + }) +} + +func TestDetectAdvertiseAddr(t *testing.T) { + t.Parallel() + + t.Run("with empty listen address", func(t *testing.T) { + addr := detectAdvertiseAddr("") + assert.NotEmpty(t, addr) + // Should be either hostname or 127.0.0.1 + assert.True(t, addr == "127.0.0.1" || !strings.Contains(addr, "0.0.0.0")) + }) + + t.Run("with 0.0.0.0 listen address", func(t *testing.T) { + addr := detectAdvertiseAddr("0.0.0.0") + assert.NotEqual(t, "0.0.0.0", addr, "should not use 0.0.0.0 as advertise address") + }) + + t.Run("with specific IP address", func(t *testing.T) { + addr := detectAdvertiseAddr("192.168.1.100") + assert.Equal(t, "192.168.1.100", addr) + }) + + t.Run("with host:port format", func(t *testing.T) { + addr := detectAdvertiseAddr("192.168.1.100:50051") + assert.Equal(t, "192.168.1.100", addr, "should extract host from host:port") + }) +} + +func TestConfig_Validate(t *testing.T) { + t.Parallel() + + t.Run("valid config", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1:7946", "node2:8000"}, + } + + err := cfg.Validate() + assert.NoError(t, err) + }) + + t.Run("nil config", func(t *testing.T) { + var cfg *Config + err := cfg.Validate() + assert.NoError(t, err) + }) + + t.Run("missing bind_addr", func(t *testing.T) { + cfg := &Config{ + BindAddr: "", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "bind_addr") + }) + + t.Run("missing advertise_addr", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "", + AdvertisePort: 7946, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "advertise_addr") + }) + + t.Run("invalid bind_port (too low)", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 0, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "bind_port") + }) + + t.Run("invalid bind_port (too high)", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 70000, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "bind_port") + }) + + t.Run("invalid advertise_port", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: -1, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "advertise_port") + }) + + t.Run("invalid seed format (missing port)", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1"}, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "seeds") + }) + + t.Run("invalid seed format (invalid port)", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1:abc"}, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "seeds") + }) + + t.Run("invalid seed port (out of range)", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1:99999"}, + } + + err := cfg.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "seeds") + }) +} + +func TestConfig_GetAdvertiseAddress(t *testing.T) { + t.Parallel() + + t.Run("returns formatted address", func(t *testing.T) { + cfg := &Config{ + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + } + + assert.Equal(t, "192.168.1.100:7946", cfg.GetAdvertiseAddress()) + }) + + t.Run("with nil config", func(t *testing.T) { + var cfg *Config + assert.Empty(t, cfg.GetAdvertiseAddress()) + }) +} + +func TestConfig_GetBindAddress(t *testing.T) { + t.Parallel() + + t.Run("returns formatted address", func(t *testing.T) { + cfg := &Config{ + BindAddr: "0.0.0.0", + BindPort: 7946, + } + + assert.Equal(t, "0.0.0.0:7946", cfg.GetBindAddress()) + }) + + t.Run("with nil config", func(t *testing.T) { + var cfg *Config + assert.Empty(t, cfg.GetBindAddress()) + }) +} + +func TestConfig_ParseSeedAddresses(t *testing.T) { + t.Parallel() + + t.Run("filters empty entries", func(t *testing.T) { + cfg := &Config{ + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1:7946", "", " ", "node2:7946"}, + } + + seeds := cfg.ParseSeedAddresses() + assert.Len(t, seeds, 2) + assert.Contains(t, seeds, "node1:7946") + assert.Contains(t, seeds, "node2:7946") + }) + + t.Run("filters own address", func(t *testing.T) { + cfg := &Config{ + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{"node1:7946", "192.168.1.100:7946", "node2:7946"}, + } + + seeds := cfg.ParseSeedAddresses() + assert.Len(t, seeds, 2) + assert.NotContains(t, seeds, "192.168.1.100:7946", "should not include own address") + }) + + t.Run("with nil config", func(t *testing.T) { + var cfg *Config + assert.Nil(t, cfg.ParseSeedAddresses()) + }) + + t.Run("with no seeds", func(t *testing.T) { + cfg := &Config{ + AdvertiseAddr: "192.168.1.100", + AdvertisePort: 7946, + Seeds: []string{}, + } + + seeds := cfg.ParseSeedAddresses() + assert.Empty(t, seeds) + }) +} diff --git a/internal/gitaly/storage/gossip/testhelper_test.go b/internal/gitaly/storage/gossip/testhelper_test.go new file mode 100644 index 00000000000..9174da84a71 --- /dev/null +++ b/internal/gitaly/storage/gossip/testhelper_test.go @@ -0,0 +1,11 @@ +package gossip + +import ( + "testing" + + "gitlab.com/gitlab-org/gitaly/v18/internal/testhelper" +) + +func TestMain(m *testing.M) { + testhelper.Run(m) +} diff --git a/internal/gitaly/storage/gossip/types.go b/internal/gitaly/storage/gossip/types.go new file mode 100644 index 00000000000..6267fe9c618 --- /dev/null +++ b/internal/gitaly/storage/gossip/types.go @@ -0,0 +1,37 @@ +package gossip + +// Config holds the configuration for the gossip protocol used to propagate +// routing table updates across the Gitaly cluster. +type Config struct { + // BindAddr is the address to bind the gossip listener to. + // Default: "0.0.0.0" (all interfaces) + BindAddr string `json:"bind_addr" toml:"bind_addr,omitempty"` + + // BindPort is the port to bind the gossip listener to. + // Default: 7946 (standard memberlist port) + BindPort int `json:"bind_port" toml:"bind_port,omitempty"` + + // AdvertiseAddr is the address to advertise to other nodes in the cluster. + // This is the address other nodes will use to contact this node. + // Default: Auto-detected from hostname or network interfaces + AdvertiseAddr string `json:"advertise_addr" toml:"advertise_addr,omitempty"` + + // AdvertisePort is the port to advertise to other nodes in the cluster. + // Default: Same as BindPort + AdvertisePort int `json:"advertise_port" toml:"advertise_port,omitempty"` + + // Seeds is a list of seed nodes to bootstrap the gossip cluster. + // Format: ["host1:port1", "host2:port2"] + // Default: Empty (must be manually configured for multi-node clusters) + Seeds []string `json:"seeds" toml:"seeds,omitempty"` +} + +const ( + // DefaultGossipBindAddr is the default bind address for the gossip protocol. + // Binds to all available network interfaces. + DefaultGossipBindAddr = "0.0.0.0" + + // DefaultGossipPort is the default port for the gossip protocol. + // This is the standard port used by the memberlist library. + DefaultGossipPort = 7946 +) -- GitLab From 1e49c1bd0be0ef43c7d9ccce8a4bee9f999bf36a Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Wed, 22 Oct 2025 08:37:12 +0700 Subject: [PATCH 2/5] gossip: Add memberlist manager for cluster membership Gitaly needs distributed cluster membership discovery for routing table propagation without a central coordinator. The previous commit established gossip configuration. This commit implements the core membership management layer using HashiCorp's memberlist library. The memberlist manager wraps the memberlist library with Gitaly-specific lifecycle management, event handling, and observability. It coordinates node discovery through seed-based bootstrapping, handles graceful degradation when gossip fails, and provides thread-safe access to cluster state. The implementation uses context-aware goroutine cleanup to prevent resource leaks and avoids deadlocks by decoupling event callbacks from synchronous state queries. Key implementation details: Event callbacks run within memberlist's internal locks, so calling back into the manager (e.g., NumMembers()) from callbacks causes deadlocks. The solution moves cluster size updates from synchronous event callbacks to periodic background updates every 30 seconds. This prevents lock contention while maintaining metric accuracy. Graceful degradation allows nodes to operate standalone when gossip initialization fails or no seeds are reachable. The manager logs warnings but continues functioning, enabling single-node deployments and resilience during cluster formation. --- go.mod | 12 + go.sum | 95 ++++ internal/gitaly/storage/gossip/events.go | 102 +++++ internal/gitaly/storage/gossip/events_test.go | 231 ++++++++++ internal/gitaly/storage/gossip/memberlist.go | 362 +++++++++++++++ .../gitaly/storage/gossip/memberlist_test.go | 422 ++++++++++++++++++ .../gitaly/storage/gossip/testhelper_test.go | 1 + 7 files changed, 1225 insertions(+) create mode 100644 internal/gitaly/storage/gossip/events.go create mode 100644 internal/gitaly/storage/gossip/events_test.go create mode 100644 internal/gitaly/storage/gossip/memberlist.go create mode 100644 internal/gitaly/storage/gossip/memberlist_test.go diff --git a/go.mod b/go.mod index 077f89c6101..f1a992c61a3 100644 --- a/go.mod +++ b/go.mod @@ -62,6 +62,8 @@ require ( google.golang.org/protobuf v1.36.10 ) +require github.com/hashicorp/memberlist v0.5.3 + require ( cel.dev/expr v0.23.1 // indirect cloud.google.com/go v0.115.1 // indirect @@ -88,6 +90,7 @@ require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/aws/aws-sdk-go-v2 v1.31.0 // indirect @@ -144,13 +147,21 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.0 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/wire v0.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hhatto/gorst v0.0.0-20181029133204-ca9f730cac5b // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect @@ -193,6 +204,7 @@ require ( github.com/rivo/uniseg v0.2.0 // indirect github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shirou/gopsutil/v3 v3.21.12 // indirect diff --git a/go.sum b/go.sum index d30c6249168..9b679c78f5c 100644 --- a/go.sum +++ b/go.sum @@ -93,6 +93,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.4.0+incompatible h1:R7WqXWP4fIOAqWJtUKmSfuc7eDsBT58k9AY5WSHVosk= github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= @@ -115,11 +116,18 @@ github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0k github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= @@ -165,12 +173,15 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzx github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -178,6 +189,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= @@ -289,7 +302,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -307,6 +324,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= @@ -349,6 +367,7 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -370,6 +389,7 @@ github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -423,13 +443,34 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk= +github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/hashicorp/yamux v0.1.2-0.20220728231024-8f49b6f63f18 h1:IVujPV6DRIu1fYF4zUHrfhkngJzmYjelXa+iSUiFZSI= github.com/hashicorp/yamux v0.1.2-0.20220728231024-8f49b6f63f18/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hhatto/gorst v0.0.0-20181029133204-ca9f730cac5b h1:Jdu2tbAxkRouSILp2EbposIb8h4gO+2QuZEn3d9sKAc= @@ -470,10 +511,17 @@ github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= @@ -484,6 +532,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -512,6 +562,7 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= @@ -520,11 +571,17 @@ github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neurosnap/sentences v1.0.6 h1:iBVUivNtlwGkYsJblWV8GGVFmXzZzak907Ci8aA0VTE= github.com/neurosnap/sentences v1.0.6/go.mod h1:pg1IapvYpWCJJm/Etxeh0+gtMf1rI1STY9S7eUCPbDc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -551,6 +608,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= @@ -562,6 +621,7 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -576,13 +636,30 @@ github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= @@ -601,6 +678,8 @@ github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086 h1:mncRSDOqYCng7jO github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086/go.mod h1:YpdgDXpumPB/+EGmGTYHeiW/0QVFRzBYTNFaxWfPDk4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a h1:iLcLb5Fwwz7g/DLK89F+uQBDeAhHhwdzB5fSlVdhGcM= github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= @@ -611,7 +690,9 @@ github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8 github.com/shogo82148/go-shuffle v0.0.0-20180218125048-27e6095f230d/go.mod h1:2htx6lmL0NGLHlO8ZCf+lQBGBHIbEujyywxJArf+2Yc= github.com/shogo82148/go-shuffle v1.0.1 h1:4swIpHXLMAz14DE4YTgakgadpRN0n1wE1dieGnOTVFU= github.com/shogo82148/go-shuffle v1.0.1/go.mod h1:HQPjVgUUZ9TNgm4/K/iXRuAdhPsQrXnAGgtk/9kqbBY= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -645,6 +726,7 @@ github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -715,6 +797,7 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= gocloud.dev v0.40.1-0.20241107185025-56954848c3aa h1:2p0wdFRGiA+WwKE0lmKwPF0rI2clAkmPeJQD5rsfEPQ= gocloud.dev v0.40.1-0.20241107185025-56954848c3aa/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -780,6 +863,7 @@ golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -787,6 +871,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -859,7 +944,9 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -874,6 +961,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -886,6 +974,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -904,6 +994,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1174,6 +1265,7 @@ google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aO google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/DataDog/dd-trace-go.v1 v1.32.0 h1:DkD0plWEVUB8v/Ru6kRBW30Hy/fRNBC8hPdcExuBZMc= gopkg.in/DataDog/dd-trace-go.v1 v1.32.0/go.mod h1:wRKMf/tRASHwH/UOfPQ3IQmVFhTz2/1a1/mpXoIjF54= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1193,7 +1285,10 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/gitaly/storage/gossip/events.go b/internal/gitaly/storage/gossip/events.go new file mode 100644 index 00000000000..2dcf85def71 --- /dev/null +++ b/internal/gitaly/storage/gossip/events.go @@ -0,0 +1,102 @@ +package gossip + +import ( + "sync" + + "github.com/hashicorp/memberlist" + "gitlab.com/gitlab-org/gitaly/v18/internal/log" +) + +// EventDelegate handles memberlist events for node join, leave, and updates. +// It implements the memberlist.EventDelegate interface and provides callbacks +// for cluster membership changes. +type EventDelegate struct { + logger log.Logger + + // Callbacks for handling membership events + onNodeJoin func(node *memberlist.Node) + onNodeLeave func(node *memberlist.Node) + onNodeUpdate func(node *memberlist.Node) + + // Mutex to protect callback registration + mu sync.RWMutex +} + +// NewEventDelegate creates a new EventDelegate with the given logger. +func NewEventDelegate(logger log.Logger) *EventDelegate { + return &EventDelegate{ + logger: logger.WithField("component", "gossip.event_delegate"), + } +} + +// NotifyJoin is called when a node joins the cluster. +// This is invoked on all nodes whenever a new node joins the cluster. +func (d *EventDelegate) NotifyJoin(node *memberlist.Node) { + d.logger.WithFields(log.Fields{ + "node_name": node.Name, + "node_addr": node.Address(), + }).Info("node joined cluster") + + d.mu.RLock() + callback := d.onNodeJoin + d.mu.RUnlock() + + if callback != nil { + callback(node) + } +} + +// NotifyLeave is called when a node leaves the cluster. +// This is invoked on all nodes whenever a node leaves the cluster (gracefully or due to failure). +func (d *EventDelegate) NotifyLeave(node *memberlist.Node) { + d.logger.WithFields(log.Fields{ + "node_name": node.Name, + "node_addr": node.Address(), + }).Warn("node left cluster") + + d.mu.RLock() + callback := d.onNodeLeave + d.mu.RUnlock() + + if callback != nil { + callback(node) + } +} + +// NotifyUpdate is called when a node's metadata is updated. +// This is invoked when the local metadata or node metadata changes. +func (d *EventDelegate) NotifyUpdate(node *memberlist.Node) { + d.logger.WithFields(log.Fields{ + "node_name": node.Name, + "node_addr": node.Address(), + }).Debug("node metadata updated") + + d.mu.RLock() + callback := d.onNodeUpdate + d.mu.RUnlock() + + if callback != nil { + callback(node) + } +} + +// SetOnNodeJoin registers a callback that will be invoked when a node joins the cluster. +func (d *EventDelegate) SetOnNodeJoin(callback func(node *memberlist.Node)) { + d.mu.Lock() + defer d.mu.Unlock() + d.onNodeJoin = callback +} + +// SetOnNodeLeave registers a callback that will be invoked when a node leaves the cluster. +func (d *EventDelegate) SetOnNodeLeave(callback func(node *memberlist.Node)) { + d.mu.Lock() + defer d.mu.Unlock() + d.onNodeLeave = callback +} + +// SetOnNodeUpdate registers a callback that will be invoked when a node's metadata is updated. +func (d *EventDelegate) SetOnNodeUpdate(callback func(node *memberlist.Node)) { + d.mu.Lock() + defer d.mu.Unlock() + d.onNodeUpdate = callback +} diff --git a/internal/gitaly/storage/gossip/events_test.go b/internal/gitaly/storage/gossip/events_test.go new file mode 100644 index 00000000000..20a2b119031 --- /dev/null +++ b/internal/gitaly/storage/gossip/events_test.go @@ -0,0 +1,231 @@ +package gossip + +import ( + "net" + "sync" + "testing" + + "github.com/hashicorp/memberlist" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v18/internal/testhelper" +) + +func TestEventDelegate_NotifyJoin(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + // Create a test node + node := &memberlist.Node{ + Name: "test-node", + Addr: net.ParseIP("192.168.1.1"), + Port: 7946, + } + + // Test without callback + require.NotPanics(t, func() { + delegate.NotifyJoin(node) + }) + + // Test with callback + var called bool + var calledNode *memberlist.Node + var mu sync.Mutex + + delegate.SetOnNodeJoin(func(n *memberlist.Node) { + mu.Lock() + defer mu.Unlock() + called = true + calledNode = n + }) + + delegate.NotifyJoin(node) + + mu.Lock() + defer mu.Unlock() + assert.True(t, called, "callback should have been invoked") + assert.Equal(t, node, calledNode, "callback should receive the correct node") +} + +func TestEventDelegate_NotifyLeave(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + node := &memberlist.Node{ + Name: "leaving-node", + Addr: net.ParseIP("192.168.1.2"), + Port: 7946, + } + + // Test without callback + require.NotPanics(t, func() { + delegate.NotifyLeave(node) + }) + + // Test with callback + var called bool + var calledNode *memberlist.Node + var mu sync.Mutex + + delegate.SetOnNodeLeave(func(n *memberlist.Node) { + mu.Lock() + defer mu.Unlock() + called = true + calledNode = n + }) + + delegate.NotifyLeave(node) + + mu.Lock() + defer mu.Unlock() + assert.True(t, called, "callback should have been invoked") + assert.Equal(t, node, calledNode, "callback should receive the correct node") +} + +func TestEventDelegate_NotifyUpdate(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + node := &memberlist.Node{ + Name: "updated-node", + Addr: net.ParseIP("192.168.1.3"), + Port: 7946, + Meta: []byte("new-metadata"), + } + + // Test without callback + require.NotPanics(t, func() { + delegate.NotifyUpdate(node) + }) + + // Test with callback + var called bool + var calledNode *memberlist.Node + var mu sync.Mutex + + delegate.SetOnNodeUpdate(func(n *memberlist.Node) { + mu.Lock() + defer mu.Unlock() + called = true + calledNode = n + }) + + delegate.NotifyUpdate(node) + + mu.Lock() + defer mu.Unlock() + assert.True(t, called, "callback should have been invoked") + assert.Equal(t, node, calledNode, "callback should receive the correct node") +} + +func TestEventDelegate_ConcurrentCallbacks(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + node := &memberlist.Node{ + Name: "concurrent-node", + Addr: net.ParseIP("192.168.1.4"), + Port: 7946, + } + + // Test concurrent callback registration and invocation + var wg sync.WaitGroup + iterations := 100 + + wg.Add(iterations * 3) // 3 types of events + + // Concurrently set and call join callbacks + for i := 0; i < iterations; i++ { + go func() { + defer wg.Done() + delegate.SetOnNodeJoin(func(n *memberlist.Node) {}) + delegate.NotifyJoin(node) + }() + } + + // Concurrently set and call leave callbacks + for i := 0; i < iterations; i++ { + go func() { + defer wg.Done() + delegate.SetOnNodeLeave(func(n *memberlist.Node) {}) + delegate.NotifyLeave(node) + }() + } + + // Concurrently set and call update callbacks + for i := 0; i < iterations; i++ { + go func() { + defer wg.Done() + delegate.SetOnNodeUpdate(func(n *memberlist.Node) {}) + delegate.NotifyUpdate(node) + }() + } + + wg.Wait() +} + +func TestEventDelegate_CallbackReplacement(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + node := &memberlist.Node{ + Name: "test-node", + Addr: net.ParseIP("192.168.1.5"), + Port: 7946, + } + + // Set initial callback + firstCalled := false + delegate.SetOnNodeJoin(func(n *memberlist.Node) { + firstCalled = true + }) + + // Replace with second callback + secondCalled := false + delegate.SetOnNodeJoin(func(n *memberlist.Node) { + secondCalled = true + }) + + // Only the second callback should be called + delegate.NotifyJoin(node) + + assert.False(t, firstCalled, "first callback should not be called after replacement") + assert.True(t, secondCalled, "second callback should be called") +} + +func TestEventDelegate_NilCallbacks(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + delegate := NewEventDelegate(logger) + + node := &memberlist.Node{ + Name: "test-node", + Addr: net.ParseIP("192.168.1.6"), + Port: 7946, + } + + // Set callback then clear it + called := false + delegate.SetOnNodeJoin(func(n *memberlist.Node) { + called = true + }) + delegate.SetOnNodeJoin(nil) + + // Should not panic with nil callback + require.NotPanics(t, func() { + delegate.NotifyJoin(node) + }) + + assert.False(t, called, "cleared callback should not be invoked") +} diff --git a/internal/gitaly/storage/gossip/memberlist.go b/internal/gitaly/storage/gossip/memberlist.go new file mode 100644 index 00000000000..791fd531f6f --- /dev/null +++ b/internal/gitaly/storage/gossip/memberlist.go @@ -0,0 +1,362 @@ +package gossip + +import ( + "context" + "fmt" + stdlog "log" + "sync" + "time" + + "github.com/hashicorp/memberlist" + "github.com/prometheus/client_golang/prometheus" + "gitlab.com/gitlab-org/gitaly/v18/internal/log" +) + +// MemberlistManager manages the lifecycle of a memberlist instance for gossip protocol. +// It handles initialization, node joining, graceful shutdown, and provides event callbacks +// for cluster membership changes. +type MemberlistManager struct { + config *Config + logger log.Logger + memberlist *memberlist.Memberlist + eventDelegate *EventDelegate + + // Lifecycle management + mu sync.RWMutex + started bool + shutdown bool + cancel context.CancelFunc + + // Metrics + metrics *memberlistMetrics +} + +// memberlistMetrics holds Prometheus metrics for memberlist health. +type memberlistMetrics struct { + clusterSize prometheus.Gauge + nodeJoinTotal prometheus.Counter + nodeLeaveTotal prometheus.Counter + healthScore prometheus.Gauge +} + +// MemberlistManagerOption is a functional option for configuring MemberlistManager. +type MemberlistManagerOption func(*MemberlistManager) + +// WithMetrics enables Prometheus metrics for the MemberlistManager. +func WithMetrics(metrics *memberlistMetrics) MemberlistManagerOption { + return func(m *MemberlistManager) { + m.metrics = metrics + } +} + +// NewMemberlistManager creates a new MemberlistManager with the given configuration. +// The manager is not started until Start() is called. +func NewMemberlistManager(cfg *Config, logger log.Logger, opts ...MemberlistManagerOption) (*MemberlistManager, error) { + if cfg == nil { + return nil, fmt.Errorf("gossip config cannot be nil") + } + + // Validate the configuration + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid gossip config: %w", err) + } + + manager := &MemberlistManager{ + config: cfg, + logger: logger.WithField("component", "gossip.memberlist_manager"), + eventDelegate: NewEventDelegate(logger), + } + + // Apply options + for _, opt := range opts { + opt(manager) + } + + return manager, nil +} + +// Start initializes the memberlist and attempts to join the cluster using configured seeds. +// This method is idempotent and will return an error if called multiple times. +func (m *MemberlistManager) Start(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.shutdown { + return fmt.Errorf("memberlist manager has been shut down") + } + + if m.started { + return fmt.Errorf("memberlist manager already started") + } + + // Create memberlist configuration + mlConfig := memberlist.DefaultLANConfig() + mlConfig.Name = m.config.GetAdvertiseAddress() + mlConfig.BindAddr = m.config.BindAddr + mlConfig.BindPort = m.config.BindPort + mlConfig.AdvertiseAddr = m.config.AdvertiseAddr + mlConfig.AdvertisePort = m.config.AdvertisePort + mlConfig.Events = m.eventDelegate + mlConfig.Logger = newMemberlistLogger(m.logger) + + m.logger.WithFields(log.Fields{ + "bind_address": m.config.GetBindAddress(), + "advertise_address": m.config.GetAdvertiseAddress(), + "seeds": m.config.Seeds, + }).Info("starting memberlist manager") + + // Create the memberlist + ml, err := memberlist.Create(mlConfig) + if err != nil { + return fmt.Errorf("failed to create memberlist: %w", err) + } + + m.memberlist = ml + m.started = true + + // Set up event callbacks for metrics + if m.metrics != nil { + m.setupMetricsCallbacks() + // Initialize cluster size metric with the local node (1 node initially) + if m.metrics.clusterSize != nil { + m.metrics.clusterSize.Set(1) + } + } + + // Attempt to join cluster using seeds + if err := m.joinCluster(ctx); err != nil { + // Log warning but don't fail - node can operate standalone + m.logger.WithError(err).Warn("failed to join cluster, operating as standalone node") + } + + // Start periodic health updates with a cancellable context + healthCtx, cancel := context.WithCancel(context.Background()) + m.cancel = cancel + go m.periodicHealthUpdate(healthCtx) + + return nil +} + +// joinCluster attempts to join the cluster using the configured seed nodes. +// If no seeds are configured or all seeds fail, the node will operate standalone. +func (m *MemberlistManager) joinCluster(ctx context.Context) error { + seeds := m.config.ParseSeedAddresses() + + if len(seeds) == 0 { + m.logger.Info("no seed nodes configured, operating as first node in cluster") + return nil + } + + m.logger.WithField("seeds", seeds).Info("attempting to join cluster") + + // Try to join with timeout + joinCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Create a channel to signal completion + done := make(chan error, 1) + + go func() { + numJoined, err := m.memberlist.Join(seeds) + if err != nil { + done <- fmt.Errorf("failed to join any seed nodes: %w", err) + return + } + + if numJoined == 0 { + done <- fmt.Errorf("successfully contacted seeds but joined 0 nodes") + return + } + + m.logger.WithField("nodes_joined", numJoined).Info("successfully joined cluster") + done <- nil + }() + + select { + case <-joinCtx.Done(): + return fmt.Errorf("timeout joining cluster: %w", joinCtx.Err()) + case err := <-done: + return err + } +} + +// Shutdown gracefully shuts down the memberlist manager. +// It notifies other nodes of the departure and cleans up resources. +// This method is idempotent and safe to call multiple times. +func (m *MemberlistManager) Shutdown(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.shutdown { + return nil // Already shut down + } + + if !m.started { + m.shutdown = true + return nil // Never started + } + + m.logger.Info("shutting down memberlist manager") + + // Cancel background goroutines + if m.cancel != nil { + m.cancel() + } + + // Create timeout for graceful shutdown + shutdownCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + // Channel to signal leave completion + done := make(chan error, 1) + + go func() { + // First leave the cluster gracefully + if err := m.memberlist.Leave(500 * time.Millisecond); err != nil { + m.logger.WithError(err).Warn("error during graceful leave") + } + + // Then shutdown the memberlist + if err := m.memberlist.Shutdown(); err != nil { + done <- fmt.Errorf("error during shutdown: %w", err) + return + } + + done <- nil + }() + + select { + case <-shutdownCtx.Done(): + // Timeout - force shutdown + m.logger.Warn("graceful shutdown timeout, forcing shutdown") + if err := m.memberlist.Shutdown(); err != nil { + return fmt.Errorf("error during forced shutdown: %w", err) + } + case err := <-done: + if err != nil { + m.logger.WithError(err).Warn("error during shutdown") + } + } + + m.shutdown = true + m.logger.Info("memberlist manager shut down successfully") + + return nil +} + +// GetMembers returns the current list of members in the cluster. +func (m *MemberlistManager) GetMembers() []*memberlist.Node { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.memberlist == nil { + return nil + } + + return m.memberlist.Members() +} + +// GetLocalNode returns the local node information. +func (m *MemberlistManager) GetLocalNode() *memberlist.Node { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.memberlist == nil { + return nil + } + + return m.memberlist.LocalNode() +} + +// NumMembers returns the number of members in the cluster. +func (m *MemberlistManager) NumMembers() int { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.memberlist == nil { + return 0 + } + + return m.memberlist.NumMembers() +} + +// GetHealthScore returns the health score of the local node. +// Lower scores indicate better health. A score of 0 is perfect health. +func (m *MemberlistManager) GetHealthScore() int { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.memberlist == nil { + return -1 + } + + return m.memberlist.GetHealthScore() +} + +// IsAlive returns true if the memberlist is started and not shut down. +func (m *MemberlistManager) IsAlive() bool { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.started && !m.shutdown +} + +// EventDelegate returns the event delegate for registering callbacks. +func (m *MemberlistManager) EventDelegate() *EventDelegate { + return m.eventDelegate +} + +// setupMetricsCallbacks registers callbacks to update Prometheus metrics on membership events. +func (m *MemberlistManager) setupMetricsCallbacks() { + m.eventDelegate.SetOnNodeJoin(func(node *memberlist.Node) { + if m.metrics.nodeJoinTotal != nil { + m.metrics.nodeJoinTotal.Inc() + } + // Note: cluster size is updated periodically in periodicHealthUpdate + // rather than synchronously here to avoid deadlocks when callbacks + // are invoked from within memberlist operations that hold locks. + }) + + m.eventDelegate.SetOnNodeLeave(func(node *memberlist.Node) { + if m.metrics.nodeLeaveTotal != nil { + m.metrics.nodeLeaveTotal.Inc() + } + // Note: cluster size is updated periodically in periodicHealthUpdate + // rather than synchronously here to avoid deadlocks when callbacks + // are invoked from within memberlist operations that hold locks. + }) +} + +// periodicHealthUpdate periodically updates health metrics. +func (m *MemberlistManager) periodicHealthUpdate(ctx context.Context) { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if m.metrics != nil && m.metrics.healthScore != nil { + score := m.GetHealthScore() + if score >= 0 { + m.metrics.healthScore.Set(float64(score)) + } + } + + if m.metrics != nil && m.metrics.clusterSize != nil { + m.metrics.clusterSize.Set(float64(m.NumMembers())) + } + } + } +} + +// newMemberlistLogger adapts Gitaly's logger to memberlist's standard logger. +func newMemberlistLogger(logger log.Logger) *stdlog.Logger { + // Get the underlying logrus entry to access the writer + if logrusLogger, ok := logger.(log.LogrusLogger); ok { + return stdlog.New(logrusLogger.Output(), "[memberlist] ", 0) + } + // Fallback to stderr if we can't get the output writer + return stdlog.New(stdlog.Writer(), "[memberlist] ", 0) +} diff --git a/internal/gitaly/storage/gossip/memberlist_test.go b/internal/gitaly/storage/gossip/memberlist_test.go new file mode 100644 index 00000000000..89cdef14fcf --- /dev/null +++ b/internal/gitaly/storage/gossip/memberlist_test.go @@ -0,0 +1,422 @@ +package gossip + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/hashicorp/memberlist" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v18/internal/testhelper" +) + +func TestNewMemberlistManager(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + + t.Run("with valid config", func(t *testing.T) { + cfg := DefaultConfig(&Config{ + BindPort: 17946, // Use non-standard port for testing + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + require.NotNil(t, manager) + assert.Equal(t, cfg, manager.config) + assert.NotNil(t, manager.eventDelegate) + assert.False(t, manager.IsAlive()) + }) + + t.Run("with nil config", func(t *testing.T) { + manager, err := NewMemberlistManager(nil, logger) + require.Error(t, err) + require.Nil(t, manager) + assert.Contains(t, err.Error(), "gossip config cannot be nil") + }) + + t.Run("with invalid config", func(t *testing.T) { + cfg := &Config{ + BindAddr: "", + BindPort: 0, + AdvertiseAddr: "", + AdvertisePort: 0, + } + + manager, err := NewMemberlistManager(cfg, logger) + require.Error(t, err) + require.Nil(t, manager) + assert.Contains(t, err.Error(), "invalid gossip config") + }) +} + +func TestMemberlistManager_StartAndShutdown(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + ctx := context.Background() + + t.Run("successful start and shutdown", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Start the manager + err = manager.Start(ctx) + require.NoError(t, err) + assert.True(t, manager.IsAlive()) + assert.NotNil(t, manager.GetLocalNode()) + + // Verify we can get members + members := manager.GetMembers() + assert.NotNil(t, members) + assert.GreaterOrEqual(t, len(members), 1, "should have at least the local node") + + // Verify initial cluster size is 1 (only local node) + assert.Equal(t, 1, manager.NumMembers()) + + // Verify health score + healthScore := manager.GetHealthScore() + assert.GreaterOrEqual(t, healthScore, 0, "health score should be non-negative") + + // Shutdown the manager + err = manager.Shutdown(ctx) + require.NoError(t, err) + assert.False(t, manager.IsAlive()) + }) + + t.Run("start already started manager", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Start once + err = manager.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager.Shutdown(ctx) + }() + + // Try to start again + err = manager.Start(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "already started") + }) + + t.Run("shutdown before start", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Shutdown without starting + err = manager.Shutdown(ctx) + require.NoError(t, err) + assert.False(t, manager.IsAlive()) + }) + + t.Run("multiple shutdowns", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + err = manager.Start(ctx) + require.NoError(t, err) + + // First shutdown + err = manager.Shutdown(ctx) + require.NoError(t, err) + + // Second shutdown should not error + err = manager.Shutdown(ctx) + require.NoError(t, err) + }) + + t.Run("start after shutdown", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + err = manager.Start(ctx) + require.NoError(t, err) + + err = manager.Shutdown(ctx) + require.NoError(t, err) + + // Try to start after shutdown + err = manager.Start(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "has been shut down") + }) +} + +func TestMemberlistManager_JoinCluster(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + ctx := context.Background() + + t.Run("join with no seeds", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + Seeds: []string{}, // No seeds + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Should start successfully even without seeds + err = manager.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager.Shutdown(ctx) + }() + + assert.True(t, manager.IsAlive()) + assert.Equal(t, 1, manager.NumMembers(), "should only have local node") + }) + + t.Run("join with invalid seeds", func(t *testing.T) { + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + Seeds: []string{"invalid-host:9999"}, // Non-existent seed + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Should start successfully but log warning about failed join + err = manager.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager.Shutdown(ctx) + }() + + assert.True(t, manager.IsAlive()) + // Should still work standalone + assert.Equal(t, 1, manager.NumMembers()) + }) + + t.Run("two nodes joining cluster", func(t *testing.T) { + port1 := findAvailablePort(t) + port2 := findAvailablePort(t) + + // Start first node + cfg1 := DefaultConfig(&Config{ + BindPort: port1, + Seeds: []string{}, + }) + + manager1, err := NewMemberlistManager(cfg1, logger) + require.NoError(t, err) + + err = manager1.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager1.Shutdown(ctx) + }() + + // Start second node with first node as seed + cfg2 := DefaultConfig(&Config{ + BindPort: port2, + Seeds: []string{fmt.Sprintf("127.0.0.1:%d", port1)}, + }) + + manager2, err := NewMemberlistManager(cfg2, logger) + require.NoError(t, err) + + err = manager2.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager2.Shutdown(ctx) + }() + + // Wait for gossip to propagate + require.Eventually(t, func() bool { + return manager1.NumMembers() == 2 && manager2.NumMembers() == 2 + }, 5*time.Second, 100*time.Millisecond, "both nodes should see each other") + }) +} + +func TestMemberlistManager_WithMetrics(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + ctx := context.Background() + + t.Run("metrics are updated on events", func(t *testing.T) { + port := findAvailablePort(t) + + // Create metrics + metrics := &memberlistMetrics{ + clusterSize: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "test_cluster_size", + }), + nodeJoinTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "test_node_join_total", + }), + nodeLeaveTotal: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "test_node_leave_total", + }), + healthScore: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "test_health_score", + }), + } + + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger, WithMetrics(metrics)) + require.NoError(t, err) + + err = manager.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager.Shutdown(ctx) + }() + + // Wait for metrics to be initialized + require.Eventually(t, func() bool { + return testutil.ToFloat64(metrics.clusterSize) == 1 + }, 1*time.Second, 10*time.Millisecond) + + // Verify initial cluster size metric + assert.Equal(t, float64(1), testutil.ToFloat64(metrics.clusterSize)) + + // Verify health score is set (should be 0 for healthy node) + healthScore := testutil.ToFloat64(metrics.healthScore) + assert.GreaterOrEqual(t, healthScore, float64(0)) + }) +} + +func TestMemberlistManager_EventCallbacks(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + ctx := context.Background() + + t.Run("callbacks are invoked on join", func(t *testing.T) { + port1 := findAvailablePort(t) + port2 := findAvailablePort(t) + + // Start first node + cfg1 := DefaultConfig(&Config{ + BindPort: port1, + }) + + manager1, err := NewMemberlistManager(cfg1, logger) + require.NoError(t, err) + + // Register join callback + joinCalled := make(chan string, 1) + manager1.EventDelegate().SetOnNodeJoin(func(node *memberlist.Node) { + select { + case joinCalled <- node.Name: + default: + } + }) + + err = manager1.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager1.Shutdown(ctx) + }() + + // Start second node + cfg2 := DefaultConfig(&Config{ + BindPort: port2, + Seeds: []string{fmt.Sprintf("127.0.0.1:%d", port1)}, + }) + + manager2, err := NewMemberlistManager(cfg2, logger) + require.NoError(t, err) + + err = manager2.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager2.Shutdown(ctx) + }() + + // Wait for join callback + select { + case nodeName := <-joinCalled: + assert.NotEmpty(t, nodeName, "join callback should receive node name") + case <-time.After(5 * time.Second): + t.Fatal("join callback was not invoked within timeout") + } + }) +} + +func TestMemberlistManager_GetMethods(t *testing.T) { + t.Parallel() + + logger := testhelper.NewLogger(t) + ctx := context.Background() + + port := findAvailablePort(t) + cfg := DefaultConfig(&Config{ + BindPort: port, + }) + + manager, err := NewMemberlistManager(cfg, logger) + require.NoError(t, err) + + // Before start + assert.Nil(t, manager.GetLocalNode()) + assert.Nil(t, manager.GetMembers()) + assert.Equal(t, 0, manager.NumMembers()) + assert.Equal(t, -1, manager.GetHealthScore()) + + // After start + err = manager.Start(ctx) + require.NoError(t, err) + defer func() { + _ = manager.Shutdown(ctx) + }() + + assert.NotNil(t, manager.GetLocalNode()) + assert.NotNil(t, manager.GetMembers()) + assert.Equal(t, 1, manager.NumMembers()) + assert.GreaterOrEqual(t, manager.GetHealthScore(), 0) +} + +// findAvailablePort finds an available port for testing. +func findAvailablePort(t *testing.T) int { + t.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer func() { + _ = listener.Close() + }() + + addr := listener.Addr().(*net.TCPAddr) + return addr.Port +} diff --git a/internal/gitaly/storage/gossip/testhelper_test.go b/internal/gitaly/storage/gossip/testhelper_test.go index 9174da84a71..1cd9f33557e 100644 --- a/internal/gitaly/storage/gossip/testhelper_test.go +++ b/internal/gitaly/storage/gossip/testhelper_test.go @@ -6,6 +6,7 @@ import ( "gitlab.com/gitlab-org/gitaly/v18/internal/testhelper" ) +// TestMain is the entry point for the test suite in the gossip package. func TestMain(m *testing.M) { testhelper.Run(m) } -- GitLab From 0fb6b3120b8cf563b3cffd89fb38868b7cdd0e30 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Thu, 16 Oct 2025 16:12:56 +0700 Subject: [PATCH 3/5] proto: Add GossipRoutingTableUpdate message for routing table gossip Gitaly Raft currently maintains per-partition routing tables in isolation. Each Raft group knows only about its own partition membership. When clients need to route requests to partitions managed by other Raft groups, there's no discovery mechanism. This commit adds the protobuf message foundation for cluster-wide routing table propagation via gossip protocol. The GossipRoutingTableUpdate message captures partition membership state at a specific Raft term/index. It includes partition key, replica list, current term/index for conflict resolution, leader ID, and repository path. The message supports three update types: CREATED for new partitions, UPDATED for membership changes, and REMOVED for deleted partitions. Timestamps enable propagation delay monitoring. The implementation includes serialization helpers with protobuf marshaling, comprehensive validation enforcing required fields and logical consistency, and conflict resolution using term/index comparison matching the existing routing table logic. Message size is limited to 1MB to prevent memory exhaustion while allowing headroom for large replica lists beyond typical 1-10KB updates. Part of: https://gitlab.com/gitlab-org/gitaly/-/issues/6907 --- internal/gitaly/storage/gossip/messages.go | 182 +++++++ .../gitaly/storage/gossip/messages_test.go | 463 ++++++++++++++++++ proto/cluster.proto | 46 ++ proto/go/gitalypb/cluster.pb.go | 396 +++++++++++---- 4 files changed, 993 insertions(+), 94 deletions(-) create mode 100644 internal/gitaly/storage/gossip/messages.go create mode 100644 internal/gitaly/storage/gossip/messages_test.go diff --git a/internal/gitaly/storage/gossip/messages.go b/internal/gitaly/storage/gossip/messages.go new file mode 100644 index 00000000000..ba99c869039 --- /dev/null +++ b/internal/gitaly/storage/gossip/messages.go @@ -0,0 +1,182 @@ +package gossip + +import ( + "fmt" + "time" + + "gitlab.com/gitlab-org/gitaly/v18/proto/go/gitalypb" + "google.golang.org/protobuf/proto" +) + +// MaxMessageSize is the maximum size allowed for a GossipRoutingTableUpdate message. +// This is set to 1MB to prevent memory exhaustion and ensure gossip remains efficient. +// Typical routing table updates are much smaller (~1-10KB), but we allow headroom for +// large replica lists. +const MaxMessageSize = 1 * 1024 * 1024 // 1MB + +// SerializeRoutingUpdate serializes a GossipRoutingTableUpdate message to bytes. +// Returns an error if the message is invalid or exceeds size limits. +func SerializeRoutingUpdate(update *gitalypb.GossipRoutingTableUpdate) ([]byte, error) { + if err := ValidateRoutingUpdate(update); err != nil { + return nil, fmt.Errorf("invalid routing update: %w", err) + } + + data, err := proto.Marshal(update) + if err != nil { + return nil, fmt.Errorf("failed to marshal routing update: %w", err) + } + + if len(data) > MaxMessageSize { + return nil, fmt.Errorf("routing update size %d bytes exceeds maximum %d bytes", len(data), MaxMessageSize) + } + + return data, nil +} + +// DeserializeRoutingUpdate deserializes bytes into a GossipRoutingTableUpdate message. +// Returns an error if the data is invalid, corrupted, or exceeds size limits. +func DeserializeRoutingUpdate(data []byte) (*gitalypb.GossipRoutingTableUpdate, error) { + if len(data) > MaxMessageSize { + return nil, fmt.Errorf("routing update size %d bytes exceeds maximum %d bytes", len(data), MaxMessageSize) + } + + if len(data) == 0 { + return nil, fmt.Errorf("empty routing update data") + } + + update := &gitalypb.GossipRoutingTableUpdate{} + if err := proto.Unmarshal(data, update); err != nil { + return nil, fmt.Errorf("failed to unmarshal routing update: %w", err) + } + + if err := ValidateRoutingUpdate(update); err != nil { + return nil, fmt.Errorf("invalid routing update after deserialization: %w", err) + } + + return update, nil +} + +// ValidateRoutingUpdate validates a GossipRoutingTableUpdate message. +// It checks for required fields, valid enum values, and logical consistency. +func ValidateRoutingUpdate(update *gitalypb.GossipRoutingTableUpdate) error { + if update == nil { + return fmt.Errorf("routing update is nil") + } + + // Validate partition key + if update.GetPartitionKey() == nil { + return fmt.Errorf("partition_key is required") + } + if update.GetPartitionKey().GetValue() == "" { + return fmt.Errorf("partition_key.value is required") + } + + // Validate update type + if update.GetUpdateType() == gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_UNSPECIFIED { + return fmt.Errorf("update_type must be specified") + } + + // For non-REMOVED updates, validate replica list and relative path + if update.GetUpdateType() != gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_REMOVED { + if len(update.GetReplicas()) == 0 { + return fmt.Errorf("replicas list is required for update_type %s", update.GetUpdateType()) + } + + // Validate each replica + for i, replica := range update.GetReplicas() { + if err := validateReplica(replica); err != nil { + return fmt.Errorf("invalid replica at index %d: %w", i, err) + } + } + + if update.GetRelativePath() == "" { + return fmt.Errorf("relative_path is required for update_type %s", update.GetUpdateType()) + } + } + + // Validate term and index (0 is valid for initial state) + // No validation needed as uint64 cannot be negative + + // Validate leader_id if present (0 means no leader, which is valid) + // Only validate leader for non-REMOVED updates since REMOVED can have empty replicas + if update.GetLeaderId() != 0 && update.GetUpdateType() != gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_REMOVED { + // Verify leader_id exists in replicas list + found := false + for _, replica := range update.GetReplicas() { + if replica.GetMemberId() == update.GetLeaderId() { + found = true + break + } + } + if !found { + return fmt.Errorf("leader_id %d not found in replicas list", update.GetLeaderId()) + } + } + + return nil +} + +// validateReplica validates a ReplicaID message. +func validateReplica(replica *gitalypb.ReplicaID) error { + if replica == nil { + return fmt.Errorf("replica is nil") + } + + if replica.GetPartitionKey() == nil { + return fmt.Errorf("partition_key is required") + } + if replica.GetPartitionKey().GetValue() == "" { + return fmt.Errorf("partition_key.value is required") + } + + if replica.GetMemberId() == 0 { + return fmt.Errorf("member_id is required") + } + + if replica.GetStorageName() == "" { + return fmt.Errorf("storage_name is required") + } + + // Metadata is optional (may not be present for learners or pending nodes) + // Address can be empty for nodes that haven't joined yet + + // Validate replica type + if replica.GetType() == gitalypb.ReplicaID_REPLICA_TYPE_UNSPECIFIED { + return fmt.Errorf("replica type must be specified") + } + + return nil +} + +// NewRoutingUpdate creates a new GossipRoutingTableUpdate with current timestamp. +// This is a helper function to ensure consistent message creation across the codebase. +func NewRoutingUpdate( + partitionKey *gitalypb.RaftPartitionKey, + replicas []*gitalypb.ReplicaID, + term, index, leaderID uint64, + relativePath string, + updateType gitalypb.GossipRoutingTableUpdate_UpdateType, +) *gitalypb.GossipRoutingTableUpdate { + return &gitalypb.GossipRoutingTableUpdate{ + PartitionKey: partitionKey, + Replicas: replicas, + Term: term, + Index: index, + LeaderId: leaderID, + RelativePath: relativePath, + UpdateType: updateType, + TimestampNanos: time.Now().UnixNano(), + } +} + +// IsNewer returns true if this update is newer than the other update based on term/index comparison. +// This implements the same conflict resolution logic as routing_table.go:90-95. +func IsNewer(update, other *gitalypb.GossipRoutingTableUpdate) bool { + if update.GetTerm() > other.GetTerm() { + return true + } + if update.GetTerm() == other.GetTerm() && update.GetIndex() > other.GetIndex() { + return true + } + return false +} diff --git a/internal/gitaly/storage/gossip/messages_test.go b/internal/gitaly/storage/gossip/messages_test.go new file mode 100644 index 00000000000..11d0e80556f --- /dev/null +++ b/internal/gitaly/storage/gossip/messages_test.go @@ -0,0 +1,463 @@ +package gossip + +import ( + "bytes" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v18/proto/go/gitalypb" +) + +func TestSerializeRoutingUpdate(t *testing.T) { + t.Parallel() + + t.Run("valid update", func(t *testing.T) { + update := createValidUpdate() + data, err := SerializeRoutingUpdate(update) + require.NoError(t, err) + require.NotEmpty(t, data) + require.Less(t, len(data), MaxMessageSize) + }) + + t.Run("nil update", func(t *testing.T) { + _, err := SerializeRoutingUpdate(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "routing update is nil") + }) + + t.Run("invalid update - missing partition key", func(t *testing.T) { + update := createValidUpdate() + update.PartitionKey = nil + _, err := SerializeRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "partition_key is required") + }) + + t.Run("invalid update - unspecified type", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_UNSPECIFIED + _, err := SerializeRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "update_type must be specified") + }) +} + +func TestDeserializeRoutingUpdate(t *testing.T) { + t.Parallel() + + t.Run("valid serialized data", func(t *testing.T) { + original := createValidUpdate() + data, err := SerializeRoutingUpdate(original) + require.NoError(t, err) + + deserialized, err := DeserializeRoutingUpdate(data) + require.NoError(t, err) + require.NotNil(t, deserialized) + require.Equal(t, original.GetPartitionKey().GetValue(), deserialized.GetPartitionKey().GetValue()) + require.Equal(t, original.GetTerm(), deserialized.GetTerm()) + require.Equal(t, original.GetIndex(), deserialized.GetIndex()) + require.Equal(t, original.GetLeaderId(), deserialized.GetLeaderId()) + require.Equal(t, original.GetRelativePath(), deserialized.GetRelativePath()) + require.Equal(t, original.GetUpdateType(), deserialized.GetUpdateType()) + require.Len(t, deserialized.GetReplicas(), len(original.GetReplicas())) + }) + + t.Run("empty data", func(t *testing.T) { + _, err := DeserializeRoutingUpdate([]byte{}) + require.Error(t, err) + require.Contains(t, err.Error(), "empty routing update data") + }) + + t.Run("corrupted data", func(t *testing.T) { + _, err := DeserializeRoutingUpdate([]byte{0x00, 0x01, 0x02, 0xFF, 0xFF}) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unmarshal routing update") + }) + + t.Run("data exceeds max size", func(t *testing.T) { + largeData := make([]byte, MaxMessageSize+1) + _, err := DeserializeRoutingUpdate(largeData) + require.Error(t, err) + require.Contains(t, err.Error(), "exceeds maximum") + }) + + t.Run("deserialized invalid message", func(t *testing.T) { + // Serialize an invalid update by bypassing validation + update := &gitalypb.GossipRoutingTableUpdate{ + // Missing required fields + UpdateType: gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED, + } + data, err := SerializeRoutingUpdate(update) + require.Error(t, err) // Should fail validation + require.Nil(t, data) + }) +} + +func TestValidateRoutingUpdate(t *testing.T) { + t.Parallel() + + t.Run("valid update - CREATED", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED + require.NoError(t, ValidateRoutingUpdate(update)) + }) + + t.Run("valid update - UPDATED", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_UPDATED + require.NoError(t, ValidateRoutingUpdate(update)) + }) + + t.Run("valid update - REMOVED", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_REMOVED + update.Replicas = nil // Replicas not required for REMOVED + update.RelativePath = "" // Path not required for REMOVED + update.LeaderId = 0 // Leader ID should be 0 for REMOVED + require.NoError(t, ValidateRoutingUpdate(update)) + }) + + t.Run("nil update", func(t *testing.T) { + err := ValidateRoutingUpdate(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "routing update is nil") + }) + + t.Run("missing partition key", func(t *testing.T) { + update := createValidUpdate() + update.PartitionKey = nil + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "partition_key is required") + }) + + t.Run("empty partition key value", func(t *testing.T) { + update := createValidUpdate() + update.PartitionKey.Value = "" + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "partition_key.value is required") + }) + + t.Run("unspecified update type", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_UNSPECIFIED + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "update_type must be specified") + }) + + t.Run("empty replicas for CREATED", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED + update.Replicas = nil + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "replicas list is required") + }) + + t.Run("empty relative path for CREATED", func(t *testing.T) { + update := createValidUpdate() + update.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED + update.RelativePath = "" + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "relative_path is required") + }) + + t.Run("leader not in replicas list", func(t *testing.T) { + update := createValidUpdate() + update.LeaderId = 9999 // Non-existent member ID + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "leader_id 9999 not found in replicas list") + }) + + t.Run("invalid replica", func(t *testing.T) { + update := createValidUpdate() + update.Replicas[0].MemberId = 0 // Invalid member ID + err := ValidateRoutingUpdate(update) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid replica") + require.Contains(t, err.Error(), "member_id is required") + }) +} + +func TestValidateReplica(t *testing.T) { + t.Parallel() + + t.Run("valid replica", func(t *testing.T) { + replica := createValidReplica(1) + require.NoError(t, validateReplica(replica)) + }) + + t.Run("nil replica", func(t *testing.T) { + err := validateReplica(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "replica is nil") + }) + + t.Run("missing partition key", func(t *testing.T) { + replica := createValidReplica(1) + replica.PartitionKey = nil + err := validateReplica(replica) + require.Error(t, err) + require.Contains(t, err.Error(), "partition_key is required") + }) + + t.Run("empty partition key value", func(t *testing.T) { + replica := createValidReplica(1) + replica.PartitionKey.Value = "" + err := validateReplica(replica) + require.Error(t, err) + require.Contains(t, err.Error(), "partition_key.value is required") + }) + + t.Run("zero member ID", func(t *testing.T) { + replica := createValidReplica(1) + replica.MemberId = 0 + err := validateReplica(replica) + require.Error(t, err) + require.Contains(t, err.Error(), "member_id is required") + }) + + t.Run("empty storage name", func(t *testing.T) { + replica := createValidReplica(1) + replica.StorageName = "" + err := validateReplica(replica) + require.Error(t, err) + require.Contains(t, err.Error(), "storage_name is required") + }) + + t.Run("unspecified replica type", func(t *testing.T) { + replica := createValidReplica(1) + replica.Type = gitalypb.ReplicaID_REPLICA_TYPE_UNSPECIFIED + err := validateReplica(replica) + require.Error(t, err) + require.Contains(t, err.Error(), "replica type must be specified") + }) + + t.Run("empty metadata address is allowed", func(t *testing.T) { + replica := createValidReplica(1) + replica.Metadata = &gitalypb.ReplicaID_Metadata{Address: ""} + // Should not error - empty address is allowed for nodes not yet joined + require.NoError(t, validateReplica(replica)) + }) +} + +func TestNewRoutingUpdate(t *testing.T) { + t.Parallel() + + partitionKey := &gitalypb.RaftPartitionKey{Value: "partition-123"} + replicas := []*gitalypb.ReplicaID{createValidReplica(1), createValidReplica(2)} + term := uint64(5) + index := uint64(100) + leaderID := uint64(1) + relativePath := "@hashed/ab/cd/abcd1234.git" + updateType := gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED + + before := time.Now().UnixNano() + update := NewRoutingUpdate(partitionKey, replicas, term, index, leaderID, relativePath, updateType) + after := time.Now().UnixNano() + + require.NotNil(t, update) + require.Equal(t, partitionKey, update.GetPartitionKey()) + require.Equal(t, replicas, update.GetReplicas()) + require.Equal(t, term, update.GetTerm()) + require.Equal(t, index, update.GetIndex()) + require.Equal(t, leaderID, update.GetLeaderId()) + require.Equal(t, relativePath, update.GetRelativePath()) + require.Equal(t, updateType, update.GetUpdateType()) + require.GreaterOrEqual(t, update.GetTimestampNanos(), before) + require.LessOrEqual(t, update.GetTimestampNanos(), after) +} + +func TestIsNewer(t *testing.T) { + t.Parallel() + + base := createValidUpdate() + base.Term = 5 + base.Index = 100 + + t.Run("higher term is newer", func(t *testing.T) { + newer := createValidUpdate() + newer.Term = 6 + newer.Index = 50 // Even with lower index + + require.True(t, IsNewer(newer, base)) + require.False(t, IsNewer(base, newer)) + }) + + t.Run("same term, higher index is newer", func(t *testing.T) { + newer := createValidUpdate() + newer.Term = 5 + newer.Index = 150 + + require.True(t, IsNewer(newer, base)) + require.False(t, IsNewer(base, newer)) + }) + + t.Run("same term, same index is not newer", func(t *testing.T) { + same := createValidUpdate() + same.Term = 5 + same.Index = 100 + + require.False(t, IsNewer(same, base)) + require.False(t, IsNewer(base, same)) + }) + + t.Run("same term, lower index is not newer", func(t *testing.T) { + older := createValidUpdate() + older.Term = 5 + older.Index = 50 + + require.False(t, IsNewer(older, base)) + require.True(t, IsNewer(base, older)) + }) + + t.Run("lower term is not newer", func(t *testing.T) { + older := createValidUpdate() + older.Term = 4 + older.Index = 200 // Even with higher index + + require.False(t, IsNewer(older, base)) + require.True(t, IsNewer(base, older)) + }) +} + +func TestMaxMessageSize(t *testing.T) { + t.Parallel() + + t.Run("serialize large update with many replicas", func(t *testing.T) { + update := createValidUpdate() + + // Add many replicas to approach size limit + // Each replica is roughly 100-200 bytes + for i := 0; i < 1000; i++ { + replica := createValidReplica(uint64(i + 10)) + // Make storage names longer to increase size + replica.StorageName = strings.Repeat("storage-", 10) + string(rune(i)) + update.Replicas = append(update.Replicas, replica) + } + + data, err := SerializeRoutingUpdate(update) + require.NoError(t, err) + require.Less(t, len(data), MaxMessageSize) + }) + + t.Run("reject oversized message", func(t *testing.T) { + // Create a message that would exceed the limit + // This is a theoretical test - in practice, we'd need to construct + // a message that's actually larger than 1MB + data := make([]byte, MaxMessageSize+1) + _, err := DeserializeRoutingUpdate(data) + require.Error(t, err) + require.Contains(t, err.Error(), "exceeds maximum") + }) +} + +func TestSerializationRoundTrip(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + update *gitalypb.GossipRoutingTableUpdate + }{ + { + name: "single replica", + update: createValidUpdate(), + }, + { + name: "multiple replicas", + update: func() *gitalypb.GossipRoutingTableUpdate { + u := createValidUpdate() + u.Replicas = append(u.Replicas, createValidReplica(2), createValidReplica(3)) + return u + }(), + }, + { + name: "no leader", + update: func() *gitalypb.GossipRoutingTableUpdate { + u := createValidUpdate() + u.LeaderId = 0 + return u + }(), + }, + { + name: "REMOVED type", + update: func() *gitalypb.GossipRoutingTableUpdate { + u := createValidUpdate() + u.UpdateType = gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_REMOVED + u.Replicas = nil + u.RelativePath = "" + return u + }(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Serialize + data, err := SerializeRoutingUpdate(tc.update) + require.NoError(t, err) + + // Deserialize + deserialized, err := DeserializeRoutingUpdate(data) + require.NoError(t, err) + + // Compare + require.Equal(t, tc.update.GetPartitionKey().GetValue(), deserialized.GetPartitionKey().GetValue()) + require.Equal(t, tc.update.GetTerm(), deserialized.GetTerm()) + require.Equal(t, tc.update.GetIndex(), deserialized.GetIndex()) + require.Equal(t, tc.update.GetLeaderId(), deserialized.GetLeaderId()) + require.Equal(t, tc.update.GetRelativePath(), deserialized.GetRelativePath()) + require.Equal(t, tc.update.GetUpdateType(), deserialized.GetUpdateType()) + require.Len(t, deserialized.GetReplicas(), len(tc.update.GetReplicas())) + }) + } +} + +func TestSerializationStability(t *testing.T) { + t.Parallel() + + update := createValidUpdate() + + // Serialize the same update multiple times + data1, err := SerializeRoutingUpdate(update) + require.NoError(t, err) + + data2, err := SerializeRoutingUpdate(update) + require.NoError(t, err) + + // Should produce identical bytes (protobuf is deterministic for same input) + require.True(t, bytes.Equal(data1, data2)) +} + +// Helper functions + +func createValidUpdate() *gitalypb.GossipRoutingTableUpdate { + return &gitalypb.GossipRoutingTableUpdate{ + PartitionKey: &gitalypb.RaftPartitionKey{Value: "partition-abc123"}, + Replicas: []*gitalypb.ReplicaID{createValidReplica(1)}, + Term: 10, + Index: 1000, + LeaderId: 1, + RelativePath: "@hashed/ab/cd/abcd1234.git", + UpdateType: gitalypb.GossipRoutingTableUpdate_UPDATE_TYPE_CREATED, + TimestampNanos: time.Now().UnixNano(), + } +} + +func createValidReplica(memberID uint64) *gitalypb.ReplicaID { + return &gitalypb.ReplicaID{ + PartitionKey: &gitalypb.RaftPartitionKey{Value: "partition-abc123"}, + MemberId: memberID, + StorageName: "default", + Metadata: &gitalypb.ReplicaID_Metadata{ + Address: "gitaly-1.example.com:8075", + }, + Type: gitalypb.ReplicaID_REPLICA_TYPE_VOTER, + } +} diff --git a/proto/cluster.proto b/proto/cluster.proto index 1af29fc1302..2a800e52844 100644 --- a/proto/cluster.proto +++ b/proto/cluster.proto @@ -264,6 +264,52 @@ message RaftClusterInfoResponse { ClusterStatistics statistics = 2; } +// GossipRoutingTableUpdate represents a routing table update propagated via gossip protocol. +// This message is used to broadcast partition routing information across all Gitaly nodes +// in a Raft cluster, enabling cluster-wide discovery of partitions regardless of which +// Raft group manages them. +message GossipRoutingTableUpdate { + // UpdateType indicates the nature of the routing table change. + enum UpdateType { + // UPDATE_TYPE_UNSPECIFIED is the default value and should not be used. + UPDATE_TYPE_UNSPECIFIED = 0; + // UPDATE_TYPE_CREATED indicates a new partition was created. + UPDATE_TYPE_CREATED = 1; + // UPDATE_TYPE_UPDATED indicates an existing partition's membership changed. + UPDATE_TYPE_UPDATED = 2; + // UPDATE_TYPE_REMOVED indicates a partition was removed. + UPDATE_TYPE_REMOVED = 3; + } + + // partition_key uniquely identifies the partition being updated. + RaftPartitionKey partition_key = 1; + + // replicas contains the current list of all replicas in the partition. + // Empty for UPDATE_TYPE_REMOVED. + repeated ReplicaID replicas = 2; + + // term is the current Raft term for this partition. + // Used for conflict resolution when merging updates. + uint64 term = 3; + + // index is the current Raft log index for this partition. + // Used for conflict resolution when merging updates. + uint64 index = 4; + + // leader_id is the member ID of the current leader (0 if no leader). + uint64 leader_id = 5; + + // relative_path is the repository path within the storage. + string relative_path = 6; + + // update_type indicates the type of update being propagated. + UpdateType update_type = 7; + + // timestamp_nanos is the Unix timestamp (in nanoseconds) when this update was created. + // Used for debugging and monitoring propagation delays. + int64 timestamp_nanos = 8; +} + // RaftService manages the sending of Raft messages to peers. service RaftService { // SendMessage processes Raft messages and ensures they are handled by diff --git a/proto/go/gitalypb/cluster.pb.go b/proto/go/gitalypb/cluster.pb.go index 63c345ac4b3..c35c7765ec9 100644 --- a/proto/go/gitalypb/cluster.pb.go +++ b/proto/go/gitalypb/cluster.pb.go @@ -75,6 +75,63 @@ func (ReplicaID_ReplicaType) EnumDescriptor() ([]byte, []int) { return file_cluster_proto_rawDescGZIP(), []int{2, 0} } +// UpdateType indicates the nature of the routing table change. +type GossipRoutingTableUpdate_UpdateType int32 + +const ( + // UPDATE_TYPE_UNSPECIFIED is the default value and should not be used. + GossipRoutingTableUpdate_UPDATE_TYPE_UNSPECIFIED GossipRoutingTableUpdate_UpdateType = 0 + // UPDATE_TYPE_CREATED indicates a new partition was created. + GossipRoutingTableUpdate_UPDATE_TYPE_CREATED GossipRoutingTableUpdate_UpdateType = 1 + // UPDATE_TYPE_UPDATED indicates an existing partition's membership changed. + GossipRoutingTableUpdate_UPDATE_TYPE_UPDATED GossipRoutingTableUpdate_UpdateType = 2 + // UPDATE_TYPE_REMOVED indicates a partition was removed. + GossipRoutingTableUpdate_UPDATE_TYPE_REMOVED GossipRoutingTableUpdate_UpdateType = 3 +) + +// Enum value maps for GossipRoutingTableUpdate_UpdateType. +var ( + GossipRoutingTableUpdate_UpdateType_name = map[int32]string{ + 0: "UPDATE_TYPE_UNSPECIFIED", + 1: "UPDATE_TYPE_CREATED", + 2: "UPDATE_TYPE_UPDATED", + 3: "UPDATE_TYPE_REMOVED", + } + GossipRoutingTableUpdate_UpdateType_value = map[string]int32{ + "UPDATE_TYPE_UNSPECIFIED": 0, + "UPDATE_TYPE_CREATED": 1, + "UPDATE_TYPE_UPDATED": 2, + "UPDATE_TYPE_REMOVED": 3, + } +) + +func (x GossipRoutingTableUpdate_UpdateType) Enum() *GossipRoutingTableUpdate_UpdateType { + p := new(GossipRoutingTableUpdate_UpdateType) + *p = x + return p +} + +func (x GossipRoutingTableUpdate_UpdateType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GossipRoutingTableUpdate_UpdateType) Descriptor() protoreflect.EnumDescriptor { + return file_cluster_proto_enumTypes[1].Descriptor() +} + +func (GossipRoutingTableUpdate_UpdateType) Type() protoreflect.EnumType { + return &file_cluster_proto_enumTypes[1] +} + +func (x GossipRoutingTableUpdate_UpdateType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GossipRoutingTableUpdate_UpdateType.Descriptor instead. +func (GossipRoutingTableUpdate_UpdateType) EnumDescriptor() ([]byte, []int) { + return file_cluster_proto_rawDescGZIP(), []int{14, 0} +} + // RaftEntry encapsulates critical data for replication using etcd/raft library. // It has a request ID allowing the primary to track when the action is // effectively applied. @@ -1055,6 +1112,122 @@ func (x *RaftClusterInfoResponse) GetStatistics() *ClusterStatistics { return nil } +// GossipRoutingTableUpdate represents a routing table update propagated via gossip protocol. +// This message is used to broadcast partition routing information across all Gitaly nodes +// in a Raft cluster, enabling cluster-wide discovery of partitions regardless of which +// Raft group manages them. +type GossipRoutingTableUpdate struct { + state protoimpl.MessageState `protogen:"open.v1"` + // partition_key uniquely identifies the partition being updated. + PartitionKey *RaftPartitionKey `protobuf:"bytes,1,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` + // replicas contains the current list of all replicas in the partition. + // Empty for UPDATE_TYPE_REMOVED. + Replicas []*ReplicaID `protobuf:"bytes,2,rep,name=replicas,proto3" json:"replicas,omitempty"` + // term is the current Raft term for this partition. + // Used for conflict resolution when merging updates. + Term uint64 `protobuf:"varint,3,opt,name=term,proto3" json:"term,omitempty"` + // index is the current Raft log index for this partition. + // Used for conflict resolution when merging updates. + Index uint64 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` + // leader_id is the member ID of the current leader (0 if no leader). + LeaderId uint64 `protobuf:"varint,5,opt,name=leader_id,json=leaderId,proto3" json:"leader_id,omitempty"` + // relative_path is the repository path within the storage. + RelativePath string `protobuf:"bytes,6,opt,name=relative_path,json=relativePath,proto3" json:"relative_path,omitempty"` + // update_type indicates the type of update being propagated. + UpdateType GossipRoutingTableUpdate_UpdateType `protobuf:"varint,7,opt,name=update_type,json=updateType,proto3,enum=gitaly.GossipRoutingTableUpdate_UpdateType" json:"update_type,omitempty"` + // timestamp_nanos is the Unix timestamp (in nanoseconds) when this update was created. + // Used for debugging and monitoring propagation delays. + TimestampNanos int64 `protobuf:"varint,8,opt,name=timestamp_nanos,json=timestampNanos,proto3" json:"timestamp_nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GossipRoutingTableUpdate) Reset() { + *x = GossipRoutingTableUpdate{} + mi := &file_cluster_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GossipRoutingTableUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GossipRoutingTableUpdate) ProtoMessage() {} + +func (x *GossipRoutingTableUpdate) ProtoReflect() protoreflect.Message { + mi := &file_cluster_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GossipRoutingTableUpdate.ProtoReflect.Descriptor instead. +func (*GossipRoutingTableUpdate) Descriptor() ([]byte, []int) { + return file_cluster_proto_rawDescGZIP(), []int{14} +} + +func (x *GossipRoutingTableUpdate) GetPartitionKey() *RaftPartitionKey { + if x != nil { + return x.PartitionKey + } + return nil +} + +func (x *GossipRoutingTableUpdate) GetReplicas() []*ReplicaID { + if x != nil { + return x.Replicas + } + return nil +} + +func (x *GossipRoutingTableUpdate) GetTerm() uint64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *GossipRoutingTableUpdate) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *GossipRoutingTableUpdate) GetLeaderId() uint64 { + if x != nil { + return x.LeaderId + } + return 0 +} + +func (x *GossipRoutingTableUpdate) GetRelativePath() string { + if x != nil { + return x.RelativePath + } + return "" +} + +func (x *GossipRoutingTableUpdate) GetUpdateType() GossipRoutingTableUpdate_UpdateType { + if x != nil { + return x.UpdateType + } + return GossipRoutingTableUpdate_UPDATE_TYPE_UNSPECIFIED +} + +func (x *GossipRoutingTableUpdate) GetTimestampNanos() int64 { + if x != nil { + return x.TimestampNanos + } + return 0 +} + // LogData contains serialized log data, including the log entry itself // and all attached files in the log entry's directory. These data are // exchanged at the Transport layer before sending and after receiving @@ -1076,7 +1249,7 @@ type RaftEntry_LogData struct { func (x *RaftEntry_LogData) Reset() { *x = RaftEntry_LogData{} - mi := &file_cluster_proto_msgTypes[14] + mi := &file_cluster_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1088,7 +1261,7 @@ func (x *RaftEntry_LogData) String() string { func (*RaftEntry_LogData) ProtoMessage() {} func (x *RaftEntry_LogData) ProtoReflect() protoreflect.Message { - mi := &file_cluster_proto_msgTypes[14] + mi := &file_cluster_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1129,7 +1302,7 @@ type ReplicaID_Metadata struct { func (x *ReplicaID_Metadata) Reset() { *x = ReplicaID_Metadata{} - mi := &file_cluster_proto_msgTypes[15] + mi := &file_cluster_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1141,7 +1314,7 @@ func (x *ReplicaID_Metadata) String() string { func (*ReplicaID_Metadata) ProtoMessage() {} func (x *ReplicaID_Metadata) ProtoReflect() protoreflect.Message { - mi := &file_cluster_proto_msgTypes[15] + mi := &file_cluster_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1177,7 +1350,7 @@ type ClusterStatistics_StorageStats struct { func (x *ClusterStatistics_StorageStats) Reset() { *x = ClusterStatistics_StorageStats{} - mi := &file_cluster_proto_msgTypes[16] + mi := &file_cluster_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1189,7 +1362,7 @@ func (x *ClusterStatistics_StorageStats) String() string { func (*ClusterStatistics_StorageStats) ProtoMessage() {} func (x *ClusterStatistics_StorageStats) ProtoReflect() protoreflect.Message { - mi := &file_cluster_proto_msgTypes[16] + mi := &file_cluster_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1244,7 +1417,7 @@ type GetPartitionsResponse_ReplicaStatus struct { func (x *GetPartitionsResponse_ReplicaStatus) Reset() { *x = GetPartitionsResponse_ReplicaStatus{} - mi := &file_cluster_proto_msgTypes[18] + mi := &file_cluster_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1256,7 +1429,7 @@ func (x *GetPartitionsResponse_ReplicaStatus) String() string { func (*GetPartitionsResponse_ReplicaStatus) ProtoMessage() {} func (x *GetPartitionsResponse_ReplicaStatus) ProtoReflect() protoreflect.Message { - mi := &file_cluster_proto_msgTypes[18] + mi := &file_cluster_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1495,40 +1668,70 @@ var file_cluster_proto_rawDesc = string([]byte{ 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x69, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x32, 0xcf, 0x03, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x1a, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1b, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, 0x97, - 0x28, 0x04, 0x08, 0x01, 0x10, 0x02, 0x28, 0x01, 0x12, 0x63, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x22, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, - 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, + 0x74, 0x69, 0x63, 0x73, 0x22, 0xe1, 0x03, 0x0a, 0x18, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x3d, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, + 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x12, 0x2d, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x49, 0x44, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, + 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4c, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2b, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e, 0x61, 0x6e, + 0x6f, 0x73, 0x22, 0x74, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, + 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, + 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x03, 0x32, 0xcf, 0x03, 0x0a, 0x0b, 0x52, 0x61, 0x66, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, + 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x01, 0x10, 0x02, 0x28, 0x01, 0x12, 0x63, 0x0a, 0x0c, + 0x53, 0x65, 0x6e, 0x64, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x22, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x01, 0x10, 0x02, 0x28, 0x01, 0x12, 0x50, 0x0a, - 0x0b, 0x4a, 0x6f, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x2e, 0x67, + 0x6f, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x01, 0x10, 0x02, 0x28, + 0x01, 0x12, 0x50, 0x0a, 0x0b, 0x4a, 0x6f, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x1a, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, - 0x79, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x01, 0x10, 0x02, 0x12, - 0x58, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1c, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, - 0x97, 0x28, 0x04, 0x08, 0x02, 0x10, 0x02, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x2e, 0x67, 0x69, - 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x69, - 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, 0x97, - 0x28, 0x04, 0x08, 0x02, 0x10, 0x02, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x38, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x67, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x12, 0x58, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1c, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x47, 0x65, + 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x02, 0x10, 0x02, 0x30, 0x01, 0x12, 0x5b, 0x0a, + 0x0e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x1e, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x08, 0xfa, 0x97, 0x28, 0x04, 0x08, 0x02, 0x10, 0x02, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, + 0x74, 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x38, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( @@ -1543,63 +1746,68 @@ func file_cluster_proto_rawDescGZIP() []byte { return file_cluster_proto_rawDescData } -var file_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_cluster_proto_goTypes = []any{ (ReplicaID_ReplicaType)(0), // 0: gitaly.ReplicaID.ReplicaType - (*RaftEntry)(nil), // 1: gitaly.RaftEntry - (*RaftPartitionKey)(nil), // 2: gitaly.RaftPartitionKey - (*ReplicaID)(nil), // 3: gitaly.ReplicaID - (*RaftMessageRequest)(nil), // 4: gitaly.RaftMessageRequest - (*RaftMessageResponse)(nil), // 5: gitaly.RaftMessageResponse - (*RaftSnapshotMessageRequest)(nil), // 6: gitaly.RaftSnapshotMessageRequest - (*RaftSnapshotMessageResponse)(nil), // 7: gitaly.RaftSnapshotMessageResponse - (*JoinClusterRequest)(nil), // 8: gitaly.JoinClusterRequest - (*JoinClusterResponse)(nil), // 9: gitaly.JoinClusterResponse - (*GetPartitionsRequest)(nil), // 10: gitaly.GetPartitionsRequest - (*ClusterStatistics)(nil), // 11: gitaly.ClusterStatistics - (*GetPartitionsResponse)(nil), // 12: gitaly.GetPartitionsResponse - (*RaftClusterInfoRequest)(nil), // 13: gitaly.RaftClusterInfoRequest - (*RaftClusterInfoResponse)(nil), // 14: gitaly.RaftClusterInfoResponse - (*RaftEntry_LogData)(nil), // 15: gitaly.RaftEntry.LogData - (*ReplicaID_Metadata)(nil), // 16: gitaly.ReplicaID.Metadata - (*ClusterStatistics_StorageStats)(nil), // 17: gitaly.ClusterStatistics.StorageStats - nil, // 18: gitaly.ClusterStatistics.StorageStatsEntry - (*GetPartitionsResponse_ReplicaStatus)(nil), // 19: gitaly.GetPartitionsResponse.ReplicaStatus - (*raftpb.Message)(nil), // 20: raftpb.Message + (GossipRoutingTableUpdate_UpdateType)(0), // 1: gitaly.GossipRoutingTableUpdate.UpdateType + (*RaftEntry)(nil), // 2: gitaly.RaftEntry + (*RaftPartitionKey)(nil), // 3: gitaly.RaftPartitionKey + (*ReplicaID)(nil), // 4: gitaly.ReplicaID + (*RaftMessageRequest)(nil), // 5: gitaly.RaftMessageRequest + (*RaftMessageResponse)(nil), // 6: gitaly.RaftMessageResponse + (*RaftSnapshotMessageRequest)(nil), // 7: gitaly.RaftSnapshotMessageRequest + (*RaftSnapshotMessageResponse)(nil), // 8: gitaly.RaftSnapshotMessageResponse + (*JoinClusterRequest)(nil), // 9: gitaly.JoinClusterRequest + (*JoinClusterResponse)(nil), // 10: gitaly.JoinClusterResponse + (*GetPartitionsRequest)(nil), // 11: gitaly.GetPartitionsRequest + (*ClusterStatistics)(nil), // 12: gitaly.ClusterStatistics + (*GetPartitionsResponse)(nil), // 13: gitaly.GetPartitionsResponse + (*RaftClusterInfoRequest)(nil), // 14: gitaly.RaftClusterInfoRequest + (*RaftClusterInfoResponse)(nil), // 15: gitaly.RaftClusterInfoResponse + (*GossipRoutingTableUpdate)(nil), // 16: gitaly.GossipRoutingTableUpdate + (*RaftEntry_LogData)(nil), // 17: gitaly.RaftEntry.LogData + (*ReplicaID_Metadata)(nil), // 18: gitaly.ReplicaID.Metadata + (*ClusterStatistics_StorageStats)(nil), // 19: gitaly.ClusterStatistics.StorageStats + nil, // 20: gitaly.ClusterStatistics.StorageStatsEntry + (*GetPartitionsResponse_ReplicaStatus)(nil), // 21: gitaly.GetPartitionsResponse.ReplicaStatus + (*raftpb.Message)(nil), // 22: raftpb.Message } var file_cluster_proto_depIdxs = []int32{ - 15, // 0: gitaly.RaftEntry.data:type_name -> gitaly.RaftEntry.LogData - 2, // 1: gitaly.ReplicaID.partition_key:type_name -> gitaly.RaftPartitionKey - 16, // 2: gitaly.ReplicaID.metadata:type_name -> gitaly.ReplicaID.Metadata + 17, // 0: gitaly.RaftEntry.data:type_name -> gitaly.RaftEntry.LogData + 3, // 1: gitaly.ReplicaID.partition_key:type_name -> gitaly.RaftPartitionKey + 18, // 2: gitaly.ReplicaID.metadata:type_name -> gitaly.ReplicaID.Metadata 0, // 3: gitaly.ReplicaID.type:type_name -> gitaly.ReplicaID.ReplicaType - 3, // 4: gitaly.RaftMessageRequest.replica_id:type_name -> gitaly.ReplicaID - 20, // 5: gitaly.RaftMessageRequest.message:type_name -> raftpb.Message - 4, // 6: gitaly.RaftSnapshotMessageRequest.raft_msg:type_name -> gitaly.RaftMessageRequest - 2, // 7: gitaly.JoinClusterRequest.partition_key:type_name -> gitaly.RaftPartitionKey - 3, // 8: gitaly.JoinClusterRequest.replicas:type_name -> gitaly.ReplicaID - 2, // 9: gitaly.GetPartitionsRequest.partition_key:type_name -> gitaly.RaftPartitionKey - 18, // 10: gitaly.ClusterStatistics.storage_stats:type_name -> gitaly.ClusterStatistics.StorageStatsEntry - 2, // 11: gitaly.GetPartitionsResponse.partition_key:type_name -> gitaly.RaftPartitionKey - 19, // 12: gitaly.GetPartitionsResponse.replicas:type_name -> gitaly.GetPartitionsResponse.ReplicaStatus - 11, // 13: gitaly.RaftClusterInfoResponse.statistics:type_name -> gitaly.ClusterStatistics - 17, // 14: gitaly.ClusterStatistics.StorageStatsEntry.value:type_name -> gitaly.ClusterStatistics.StorageStats - 3, // 15: gitaly.GetPartitionsResponse.ReplicaStatus.replica_id:type_name -> gitaly.ReplicaID - 4, // 16: gitaly.RaftService.SendMessage:input_type -> gitaly.RaftMessageRequest - 6, // 17: gitaly.RaftService.SendSnapshot:input_type -> gitaly.RaftSnapshotMessageRequest - 8, // 18: gitaly.RaftService.JoinCluster:input_type -> gitaly.JoinClusterRequest - 10, // 19: gitaly.RaftService.GetPartitions:input_type -> gitaly.GetPartitionsRequest - 13, // 20: gitaly.RaftService.GetClusterInfo:input_type -> gitaly.RaftClusterInfoRequest - 5, // 21: gitaly.RaftService.SendMessage:output_type -> gitaly.RaftMessageResponse - 7, // 22: gitaly.RaftService.SendSnapshot:output_type -> gitaly.RaftSnapshotMessageResponse - 9, // 23: gitaly.RaftService.JoinCluster:output_type -> gitaly.JoinClusterResponse - 12, // 24: gitaly.RaftService.GetPartitions:output_type -> gitaly.GetPartitionsResponse - 14, // 25: gitaly.RaftService.GetClusterInfo:output_type -> gitaly.RaftClusterInfoResponse - 21, // [21:26] is the sub-list for method output_type - 16, // [16:21] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 4, // 4: gitaly.RaftMessageRequest.replica_id:type_name -> gitaly.ReplicaID + 22, // 5: gitaly.RaftMessageRequest.message:type_name -> raftpb.Message + 5, // 6: gitaly.RaftSnapshotMessageRequest.raft_msg:type_name -> gitaly.RaftMessageRequest + 3, // 7: gitaly.JoinClusterRequest.partition_key:type_name -> gitaly.RaftPartitionKey + 4, // 8: gitaly.JoinClusterRequest.replicas:type_name -> gitaly.ReplicaID + 3, // 9: gitaly.GetPartitionsRequest.partition_key:type_name -> gitaly.RaftPartitionKey + 20, // 10: gitaly.ClusterStatistics.storage_stats:type_name -> gitaly.ClusterStatistics.StorageStatsEntry + 3, // 11: gitaly.GetPartitionsResponse.partition_key:type_name -> gitaly.RaftPartitionKey + 21, // 12: gitaly.GetPartitionsResponse.replicas:type_name -> gitaly.GetPartitionsResponse.ReplicaStatus + 12, // 13: gitaly.RaftClusterInfoResponse.statistics:type_name -> gitaly.ClusterStatistics + 3, // 14: gitaly.GossipRoutingTableUpdate.partition_key:type_name -> gitaly.RaftPartitionKey + 4, // 15: gitaly.GossipRoutingTableUpdate.replicas:type_name -> gitaly.ReplicaID + 1, // 16: gitaly.GossipRoutingTableUpdate.update_type:type_name -> gitaly.GossipRoutingTableUpdate.UpdateType + 19, // 17: gitaly.ClusterStatistics.StorageStatsEntry.value:type_name -> gitaly.ClusterStatistics.StorageStats + 4, // 18: gitaly.GetPartitionsResponse.ReplicaStatus.replica_id:type_name -> gitaly.ReplicaID + 5, // 19: gitaly.RaftService.SendMessage:input_type -> gitaly.RaftMessageRequest + 7, // 20: gitaly.RaftService.SendSnapshot:input_type -> gitaly.RaftSnapshotMessageRequest + 9, // 21: gitaly.RaftService.JoinCluster:input_type -> gitaly.JoinClusterRequest + 11, // 22: gitaly.RaftService.GetPartitions:input_type -> gitaly.GetPartitionsRequest + 14, // 23: gitaly.RaftService.GetClusterInfo:input_type -> gitaly.RaftClusterInfoRequest + 6, // 24: gitaly.RaftService.SendMessage:output_type -> gitaly.RaftMessageResponse + 8, // 25: gitaly.RaftService.SendSnapshot:output_type -> gitaly.RaftSnapshotMessageResponse + 10, // 26: gitaly.RaftService.JoinCluster:output_type -> gitaly.JoinClusterResponse + 13, // 27: gitaly.RaftService.GetPartitions:output_type -> gitaly.GetPartitionsResponse + 15, // 28: gitaly.RaftService.GetClusterInfo:output_type -> gitaly.RaftClusterInfoResponse + 24, // [24:29] is the sub-list for method output_type + 19, // [19:24] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_cluster_proto_init() } @@ -1617,8 +1825,8 @@ func file_cluster_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_cluster_proto_rawDesc), len(file_cluster_proto_rawDesc)), - NumEnums: 1, - NumMessages: 19, + NumEnums: 2, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, -- GitLab From 05460ca4912449b84d3c05a118a0649ab479e9c0 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Tue, 21 Oct 2025 16:00:23 +0700 Subject: [PATCH 4/5] gossip: Fix memberlist test failures in CI environments The memberlist tests were failing in CI with two issues: 1. Advertise address parsing failure: CI runner hostnames like "runner-ourj7cimu-project-2009901-concurrent-0" were being passed directly to memberlist, which expects IP addresses. The detectAdvertiseAddr() function now resolves the hostname to an IP address using net.LookupHost() and returns the first resolved address instead of the hostname string. 2. Goroutine leaks: Tests using defer for cleanup caused timing issues with parallel tests. Memberlist's background UDP/TCP listener goroutines weren't fully terminated before test completion. Replaced all defer cleanup with t.Cleanup() which provides better cleanup ordering in Go tests, especially with t.Parallel(). All gossip package tests now pass without goroutine leaks. --- internal/gitaly/storage/gossip/config.go | 10 +++--- .../gitaly/storage/gossip/memberlist_test.go | 36 +++++++++---------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/internal/gitaly/storage/gossip/config.go b/internal/gitaly/storage/gossip/config.go index 4b0f56b7b15..cc19e54bc47 100644 --- a/internal/gitaly/storage/gossip/config.go +++ b/internal/gitaly/storage/gossip/config.go @@ -52,7 +52,7 @@ func DefaultConfig(userConfig *Config) *Config { // detectAdvertiseAddr attempts to auto-detect a suitable advertise address. // It tries the following strategies in order: // 1. Use the provided listenAddr if it's a valid non-0.0.0.0 address -// 2. Use the system hostname +// 2. Use the system hostname's IP address // 3. Fall back to 127.0.0.1 func detectAdvertiseAddr(listenAddr string) string { // If listenAddr is provided and not 0.0.0.0, use it @@ -65,12 +65,14 @@ func detectAdvertiseAddr(listenAddr string) string { return listenAddr } - // Try to get the hostname + // Try to get the hostname and resolve it to an IP address hostname, err := os.Hostname() if err == nil && hostname != "" { - // Verify the hostname is resolvable + // Verify the hostname is resolvable and return the first IP address if addrs, err := net.LookupHost(hostname); err == nil && len(addrs) > 0 { - return hostname + // Return the first resolved IP address instead of the hostname + // This ensures memberlist receives a valid IP address + return addrs[0] } } diff --git a/internal/gitaly/storage/gossip/memberlist_test.go b/internal/gitaly/storage/gossip/memberlist_test.go index 89cdef14fcf..d68ecd46171 100644 --- a/internal/gitaly/storage/gossip/memberlist_test.go +++ b/internal/gitaly/storage/gossip/memberlist_test.go @@ -106,9 +106,9 @@ func TestMemberlistManager_StartAndShutdown(t *testing.T) { // Start once err = manager.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager.Shutdown(ctx) - }() + }) // Try to start again err = manager.Start(ctx) @@ -193,9 +193,9 @@ func TestMemberlistManager_JoinCluster(t *testing.T) { // Should start successfully even without seeds err = manager.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager.Shutdown(ctx) - }() + }) assert.True(t, manager.IsAlive()) assert.Equal(t, 1, manager.NumMembers(), "should only have local node") @@ -214,9 +214,9 @@ func TestMemberlistManager_JoinCluster(t *testing.T) { // Should start successfully but log warning about failed join err = manager.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager.Shutdown(ctx) - }() + }) assert.True(t, manager.IsAlive()) // Should still work standalone @@ -238,9 +238,9 @@ func TestMemberlistManager_JoinCluster(t *testing.T) { err = manager1.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager1.Shutdown(ctx) - }() + }) // Start second node with first node as seed cfg2 := DefaultConfig(&Config{ @@ -253,9 +253,9 @@ func TestMemberlistManager_JoinCluster(t *testing.T) { err = manager2.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager2.Shutdown(ctx) - }() + }) // Wait for gossip to propagate require.Eventually(t, func() bool { @@ -298,9 +298,9 @@ func TestMemberlistManager_WithMetrics(t *testing.T) { err = manager.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager.Shutdown(ctx) - }() + }) // Wait for metrics to be initialized require.Eventually(t, func() bool { @@ -345,9 +345,9 @@ func TestMemberlistManager_EventCallbacks(t *testing.T) { err = manager1.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager1.Shutdown(ctx) - }() + }) // Start second node cfg2 := DefaultConfig(&Config{ @@ -360,9 +360,9 @@ func TestMemberlistManager_EventCallbacks(t *testing.T) { err = manager2.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager2.Shutdown(ctx) - }() + }) // Wait for join callback select { @@ -397,9 +397,9 @@ func TestMemberlistManager_GetMethods(t *testing.T) { // After start err = manager.Start(ctx) require.NoError(t, err) - defer func() { + t.Cleanup(func() { _ = manager.Shutdown(ctx) - }() + }) assert.NotNil(t, manager.GetLocalNode()) assert.NotNil(t, manager.GetMembers()) -- GitLab From 8d8be48814a276e288058ea3194a7dba21db65a3 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Wed, 22 Oct 2025 12:06:14 +0700 Subject: [PATCH 5/5] gossip: Improve configuration logic and context management The configuration logic in DefaultConfig() had redundant AdvertisePort handling split across multiple locations, making it harder to follow the defaulting behavior. This commit consolidates the logic by setting the default once after initialization, then applying user overrides in a single conditional block. The periodicHealthUpdate goroutine was using context.Background() instead of deriving from the parent context. This prevented proper cancellation when the parent context was cancelled. The health update context now derives from the parent, ensuring the goroutine stops when either the parent is cancelled or Shutdown() is called. Additional improvements include clarifying that detectAdvertiseAddr returns IP addresses for memberlist compatibility and updating function references to use stable names instead of line numbers. --- internal/gitaly/storage/gossip/config.go | 12 +++++++----- internal/gitaly/storage/gossip/memberlist.go | 6 ++++-- internal/gitaly/storage/gossip/messages.go | 2 +- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/internal/gitaly/storage/gossip/config.go b/internal/gitaly/storage/gossip/config.go index cc19e54bc47..b286546f126 100644 --- a/internal/gitaly/storage/gossip/config.go +++ b/internal/gitaly/storage/gossip/config.go @@ -21,6 +21,7 @@ func DefaultConfig(userConfig *Config) *Config { // Auto-detect advertise address if not provided cfg.AdvertiseAddr = detectAdvertiseAddr("") + // Default AdvertisePort to BindPort cfg.AdvertisePort = cfg.BindPort // Apply user overrides if provided @@ -30,16 +31,15 @@ func DefaultConfig(userConfig *Config) *Config { } if userConfig.BindPort != 0 { cfg.BindPort = userConfig.BindPort - // Update AdvertisePort default to match custom BindPort - if userConfig.AdvertisePort == 0 { - cfg.AdvertisePort = userConfig.BindPort - } } if userConfig.AdvertiseAddr != "" { cfg.AdvertiseAddr = userConfig.AdvertiseAddr } if userConfig.AdvertisePort != 0 { cfg.AdvertisePort = userConfig.AdvertisePort + } else { + // Update AdvertisePort to match custom BindPort when not explicitly set + cfg.AdvertisePort = cfg.BindPort } if len(userConfig.Seeds) > 0 { cfg.Seeds = userConfig.Seeds @@ -52,8 +52,10 @@ func DefaultConfig(userConfig *Config) *Config { // detectAdvertiseAddr attempts to auto-detect a suitable advertise address. // It tries the following strategies in order: // 1. Use the provided listenAddr if it's a valid non-0.0.0.0 address -// 2. Use the system hostname's IP address +// 2. Resolve the system hostname to its first IP address // 3. Fall back to 127.0.0.1 +// +// Note: Returns IP addresses rather than hostnames for memberlist compatibility. func detectAdvertiseAddr(listenAddr string) string { // If listenAddr is provided and not 0.0.0.0, use it if listenAddr != "" && listenAddr != "0.0.0.0" && listenAddr != "[::]" { diff --git a/internal/gitaly/storage/gossip/memberlist.go b/internal/gitaly/storage/gossip/memberlist.go index 791fd531f6f..05863c3b8da 100644 --- a/internal/gitaly/storage/gossip/memberlist.go +++ b/internal/gitaly/storage/gossip/memberlist.go @@ -130,7 +130,7 @@ func (m *MemberlistManager) Start(ctx context.Context) error { } // Start periodic health updates with a cancellable context - healthCtx, cancel := context.WithCancel(context.Background()) + healthCtx, cancel := context.WithCancel(ctx) m.cancel = cancel go m.periodicHealthUpdate(healthCtx) @@ -357,6 +357,8 @@ func newMemberlistLogger(logger log.Logger) *stdlog.Logger { if logrusLogger, ok := logger.(log.LogrusLogger); ok { return stdlog.New(logrusLogger.Output(), "[memberlist] ", 0) } - // Fallback to stderr if we can't get the output writer + // Fallback to stderr for defensive programming in case of unexpected logger types. + // This should not be reached in practice since all Gitaly loggers are LogrusLogger instances, + // but provides a safe default rather than panicking. return stdlog.New(stdlog.Writer(), "[memberlist] ", 0) } diff --git a/internal/gitaly/storage/gossip/messages.go b/internal/gitaly/storage/gossip/messages.go index ba99c869039..06d5ba0d4fb 100644 --- a/internal/gitaly/storage/gossip/messages.go +++ b/internal/gitaly/storage/gossip/messages.go @@ -170,7 +170,7 @@ func NewRoutingUpdate( } // IsNewer returns true if this update is newer than the other update based on term/index comparison. -// This implements the same conflict resolution logic as routing_table.go:90-95. +// This implements the same conflict resolution logic as RoutingTable.SetEntry. func IsNewer(update, other *gitalypb.GossipRoutingTableUpdate) bool { if update.GetTerm() > other.GetTerm() { return true -- GitLab