diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index dc1d2214..ebae81f1 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -14,7 +14,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: "1.19" + go-version: "1.20" - uses: actions/cache@v2 with: diff --git a/.github/workflows/test-e2e.yaml b/.github/workflows/test-e2e.yaml index f02c6117..e1b4ddeb 100644 --- a/.github/workflows/test-e2e.yaml +++ b/.github/workflows/test-e2e.yaml @@ -14,7 +14,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: "1.19" + go-version: "1.20" - uses: actions/cache@v2 with: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c974ef5f..1572c715 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: "1.19" + go-version: "1.20" - uses: actions/cache@v2 with: diff --git a/Dockerfile b/Dockerfile index f5c3fbd6..4df212ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.19 as builder +FROM golang:1.20 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/Makefile b/Makefile index b1a458f7..b1dcd2ae 100644 --- a/Makefile +++ b/Makefile @@ -110,10 +110,10 @@ TEST_PKG = ./api/... ./controllers/... ./pkg/... KUBEBUILDER_ASSETS = "$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" test: manifests generate fmt vet envtest assets ginkgo ## Run tests. - KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) $(GINKGO) -p -r $(TEST_PKG) -coverprofile cover.out + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) $(GINKGO) -p -v -r $(TEST_PKG) -coverprofile cover.out test-sequential: manifests generate fmt vet envtest assets ginkgo ## Run tests. - KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) $(GINKGO) -r $(TEST_PKG) -coverprofile cover.out + KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) $(GINKGO) -v -r $(TEST_PKG) -coverprofile cover.out test-e2e: export KUBECONFIG = $(PWD)/kubeconfig test-e2e: manifests ginkgo kind-create kind-deploy ## Runs e2e tests diff --git a/api/v1alpha1/redisshard_types.go b/api/v1alpha1/redisshard_types.go index 36d7a626..30ef3b9d 100644 --- a/api/v1alpha1/redisshard_types.go +++ b/api/v1alpha1/redisshard_types.go @@ -17,6 +17,10 @@ limitations under the License. package v1alpha1 import ( + "strconv" + "strings" + + "github.com/3scale/saas-operator/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -61,11 +65,54 @@ type RedisShardNodes struct { // Master is the node that acts as master role in the redis shard // +operator-sdk:csv:customresourcedefinitions:type=status // +optional - Master *string `json:"master,omitempty"` + Master map[string]string `json:"master,omitempty"` // Slaves are the nodes that act as master role in the redis shard // +operator-sdk:csv:customresourcedefinitions:type=status // +optional - Slaves []string `json:"slaves,omitempty"` + Slaves map[string]string `json:"slaves,omitempty"` +} + +func (rsn *RedisShardNodes) MasterHostPort() string { + for _, hostport := range rsn.Master { + return hostport + } + return "" +} + +func (rsn *RedisShardNodes) GetNodeByPodIndex(podIndex int) (string, string) { + nodes := util.MergeMaps(map[string]string{}, rsn.Master, rsn.Slaves) + + for alias, hostport := range nodes { + i := alias[strings.LastIndex(alias, "-")+1:] + index, _ := strconv.Atoi(i) + if index == podIndex { + return alias, hostport + } + } + + return "", "" +} + +func (rsn *RedisShardNodes) GetHostPortByPodIndex(podIndex int) string { + _, hostport := rsn.GetNodeByPodIndex(podIndex) + return hostport +} + +func (rsn *RedisShardNodes) GetAliasByPodIndex(podIndex int) string { + alias, _ := rsn.GetNodeByPodIndex(podIndex) + return alias +} + +func (rsn *RedisShardNodes) GetIndexByHostPort(hostport string) int { + nodes := util.MergeMaps(map[string]string{}, rsn.Master, rsn.Slaves) + for alias, hp := range nodes { + if hostport == hp { + i := alias[strings.LastIndex(alias, "-")+1:] + index, _ := strconv.Atoi(i) + return index + } + } + return -1 } // RedisShardStatus defines the observed state of RedisShard diff --git a/api/v1alpha1/redisshard_types_test.go b/api/v1alpha1/redisshard_types_test.go new file mode 100644 index 00000000..f074140b --- /dev/null +++ b/api/v1alpha1/redisshard_types_test.go @@ -0,0 +1,232 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" +) + +func TestRedisShardNodes_GetNodeByPodIndex(t *testing.T) { + type fields struct { + Master map[string]string + Slaves map[string]string + } + type args struct { + podIndex int + } + tests := []struct { + name string + fields fields + args args + want string + want1 string + }{ + { + name: "Returns the node that has the given pod index", + fields: fields{ + Master: map[string]string{ + "redis-shard-rs0-0": "127.0.0.1:1000", + }, + Slaves: map[string]string{ + "redis-shard-rs0-1": "127.0.0.1:2000", + "redis-shard-rs0-2": "127.0.0.1:3000", + }, + }, + args: args{ + podIndex: 2, + }, + want: "redis-shard-rs0-2", + want1: "127.0.0.1:3000", + }, + { + name: "Not found", + fields: fields{ + Master: map[string]string{ + "redis-shard-rs0-0": "127.0.0.1:1000", + }, + Slaves: map[string]string{ + "redis-shard-rs0-1": "127.0.0.1:2000", + "redis-shard-rs0-2": "127.0.0.1:3000", + }, + }, + args: args{ + podIndex: 3, + }, + want: "", + want1: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsn := &RedisShardNodes{ + Master: tt.fields.Master, + Slaves: tt.fields.Slaves, + } + got, got1 := rsn.GetNodeByPodIndex(tt.args.podIndex) + if got != tt.want { + t.Errorf("RedisShardNodes.GetNodeByPodIndex() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("RedisShardNodes.GetNodeByPodIndex() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func TestRedisShardNodes_MasterHostPort(t *testing.T) { + type fields struct { + Master map[string]string + Slaves map[string]string + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "Returns master hostport", + fields: fields{ + Master: map[string]string{"rs0-0": "127.0.0.1:1000"}, + }, + want: "127.0.0.1:1000", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsn := &RedisShardNodes{ + Master: tt.fields.Master, + Slaves: tt.fields.Slaves, + } + if got := rsn.MasterHostPort(); got != tt.want { + t.Errorf("RedisShardNodes.MasterHostPort() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRedisShardNodes_GetHostPortByPodIndex(t *testing.T) { + type fields struct { + Master map[string]string + Slaves map[string]string + } + type args struct { + podIndex int + } + tests := []struct { + name string + fields fields + args args + want string + }{ + { + name: "Returns the hosport of the node with the given replicaset index", + fields: fields{ + Master: map[string]string{"rs0-0": "127.0.0.1:1000"}, + Slaves: map[string]string{"rs0-1": "127.0.0.1:2000", "rs0-2": "127.0.0.1:3000"}, + }, + args: args{podIndex: 2}, + want: "127.0.0.1:3000", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsn := &RedisShardNodes{ + Master: tt.fields.Master, + Slaves: tt.fields.Slaves, + } + if got := rsn.GetHostPortByPodIndex(tt.args.podIndex); got != tt.want { + t.Errorf("RedisShardNodes.GetHostPortByPodIndex() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRedisShardNodes_GetAliasByPodIndex(t *testing.T) { + type fields struct { + Master map[string]string + Slaves map[string]string + } + type args struct { + podIndex int + } + tests := []struct { + name string + fields fields + args args + want string + }{ + { + name: "Returns the alias of the node with the given replicaset index", + fields: fields{ + Master: map[string]string{"rs0-0": "127.0.0.1:1000"}, + Slaves: map[string]string{"rs0-1": "127.0.0.1:2000", "rs0-2": "127.0.0.1:3000"}, + }, + args: args{podIndex: 1}, + want: "rs0-1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsn := &RedisShardNodes{ + Master: tt.fields.Master, + Slaves: tt.fields.Slaves, + } + if got := rsn.GetAliasByPodIndex(tt.args.podIndex); got != tt.want { + t.Errorf("RedisShardNodes.GetAliasByPodIndex() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRedisShardNodes_GetIndexByHostPort(t *testing.T) { + type fields struct { + Master map[string]string + Slaves map[string]string + } + type args struct { + hostport string + } + tests := []struct { + name string + fields fields + args args + want int + }{ + { + name: "Returns the index of the provided hostport", + fields: fields{ + Master: map[string]string{"rs0-0": "127.0.0.1:1000"}, + Slaves: map[string]string{"rs0-1": "127.0.0.1:2000", "rs0-2": "127.0.0.1:3000"}, + }, + args: args{ + hostport: "127.0.0.1:3000", + }, + want: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsn := &RedisShardNodes{ + Master: tt.fields.Master, + Slaves: tt.fields.Slaves, + } + if got := rsn.GetIndexByHostPort(tt.args.hostport); got != tt.want { + t.Errorf("RedisShardNodes.GetIndexByHostPort() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/api/v1alpha1/sentinel_types.go b/api/v1alpha1/sentinel_types.go index 32bcaebb..f383c533 100644 --- a/api/v1alpha1/sentinel_types.go +++ b/api/v1alpha1/sentinel_types.go @@ -17,10 +17,9 @@ limitations under the License. package v1alpha1 import ( - "fmt" "time" - "github.com/3scale/saas-operator/pkg/redis/crud/client" + "github.com/3scale/saas-operator/pkg/redis/client" "github.com/3scale/saas-operator/pkg/util" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -77,7 +76,13 @@ type SentinelConfig struct { // Monitored shards indicates the redis servers that form // part of each shard monitored by sentinel // +operator-sdk:csv:customresourcedefinitions:type=spec + // +optional MonitoredShards map[string][]string `json:"monitoredShards,"` + // ClusterTopology indicates the redis servers that form + // part of each shard monitored by sentinel + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +optional + ClusterTopology map[string]map[string]string `json:"clusterTopology,"` // StorageClass is the storage class to be used for // the persistent sentinel config file where the shards // state is stored @@ -188,24 +193,6 @@ type MonitoredShard struct { // Name is the name of the redis shard // +operator-sdk:csv:customresourcedefinitions:type=status Name string `json:"name"` - // Master is the address of the master redis server of - // this shard, in the format "127.0.0.1:6379" - // DEPRECATED - this field will be removed in an upcoming release - // +operator-sdk:csv:customresourcedefinitions:type=status - // +optional - Master string `json:"master,omitempty"` - // SlavesRO is the list of addresses of the read-only slave - // servers in this shard, in the format "127.0.0.1:6379" - // DEPRECATED - this field will be removed in an upcoming release - // +operator-sdk:csv:customresourcedefinitions:type=status - // +optional - SlavesRO []string `json:"slavesRO,omitempty"` - // SlavesRW is the list of addresses of the read-write slave - // servers in this shard, in the format "127.0.0.1:6379" - // DEPRECATED - this field will be removed in an upcoming release - // +operator-sdk:csv:customresourcedefinitions:type=status - // +optional - SlavesRW []string `json:"slavesRW,omitempty"` // Server is a map intended to store configuration information // of each of the RedisServer instances that belong to the MonitoredShard // +operator-sdk:csv:customresourcedefinitions:type=status @@ -213,45 +200,14 @@ type MonitoredShard struct { Servers map[string]RedisServerDetails `json:"servers,omitempty"` } -func (ms MonitoredShard) GetMaster() (string, RedisServerDetails, error) { - for address, srv := range ms.Servers { - if srv.Role == client.Master { - // there is only one master, so we return - return address, srv, nil - } - } - return "", RedisServerDetails{}, fmt.Errorf("unable to find master") -} - -func (ms MonitoredShard) GetSlavesRW() map[string]RedisServerDetails { - servers := map[string]RedisServerDetails{} - for address, srv := range ms.Servers { - if srv.Role == client.Slave { - if val, ok := srv.Config["slave-read-only"]; ok && val == "no" { - servers[address] = srv - } - } - } - return servers -} - -func (ms MonitoredShard) GetSlavesRO() map[string]RedisServerDetails { - servers := map[string]RedisServerDetails{} - for address, srv := range ms.Servers { - if srv.Role == client.Slave { - if val, ok := srv.Config["slave-read-only"]; ok && val == "yes" { - servers[address] = srv - } - } - } - return servers -} - type RedisServerDetails struct { // +operator-sdk:csv:customresourcedefinitions:type=status Role client.Role `json:"role"` // +operator-sdk:csv:customresourcedefinitions:type=status // +optional + Address string `json:"address,omitempty"` + // +operator-sdk:csv:customresourcedefinitions:type=status + // +optional Config map[string]string `json:"config,omitempty"` } diff --git a/api/v1alpha1/twemproxyconfig_types.go b/api/v1alpha1/twemproxyconfig_types.go index 960fc9e6..7fe6cfa6 100644 --- a/api/v1alpha1/twemproxyconfig_types.go +++ b/api/v1alpha1/twemproxyconfig_types.go @@ -43,6 +43,7 @@ type TwemproxyConfigSpec struct { // +optional SentinelURIs []string `json:"sentinelURIs,omitempty"` // ServerPools is the list of Twemproxy server pools + // WARNING: only 1 pool is supported at this time // +operator-sdk:csv:customresourcedefinitions:type=spec ServerPools []TwemproxyServerPool `json:"serverPools"` // ReconcileServerPools is a flag that allows to deactivate @@ -129,11 +130,22 @@ type ShardedRedisTopology struct { } // TwemproxyConfigStatus defines the observed state of TwemproxyConfig -type TwemproxyConfigStatus struct{} +type TwemproxyConfigStatus struct { + // The list of serves currently targeted by this TwemproxyConfig + // +operator-sdk:csv:customresourcedefinitions:type=status + // +optional + SelectedTargets map[string]TargetServer `json:"targets,omitempty"` +} -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status +// Defines a server targeted by one of the TwemproxyConfig server pools +type TargetServer struct { + ServerAlias *string `json:"serverAlias,omitempty"` + ServerAddress string `json:"serverAddress"` +} +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=`.status.targets`,name=Selected Targets,type=string // TwemproxyConfig is the Schema for the twemproxyconfigs API type TwemproxyConfig struct { metav1.TypeMeta `json:",inline"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 65155372..d7fb16e0 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1830,16 +1830,6 @@ func (in *Marin3rSidecarSpec) DeepCopy() *Marin3rSidecarSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoredShard) DeepCopyInto(out *MonitoredShard) { *out = *in - if in.SlavesRO != nil { - in, out := &in.SlavesRO, &out.SlavesRO - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SlavesRW != nil { - in, out := &in.SlavesRW, &out.SlavesRW - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.Servers != nil { in, out := &in.Servers, &out.Servers *out = make(map[string]RedisServerDetails, len(*in)) @@ -2177,13 +2167,17 @@ func (in *RedisShardNodes) DeepCopyInto(out *RedisShardNodes) { *out = *in if in.Master != nil { in, out := &in.Master, &out.Master - *out = new(string) - **out = **in + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } if in.Slaves != nil { in, out := &in.Slaves, &out.Slaves - *out = make([]string, len(*in)) - copy(*out, *in) + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } } @@ -2513,6 +2507,23 @@ func (in *SentinelConfig) DeepCopyInto(out *SentinelConfig) { (*out)[key] = outVal } } + if in.ClusterTopology != nil { + in, out := &in.ClusterTopology, &out.ClusterTopology + *out = make(map[string]map[string]string, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } if in.StorageClass != nil { in, out := &in.StorageClass, &out.StorageClass *out = new(string) @@ -3269,13 +3280,33 @@ func (in *SystemZyncSpec) DeepCopy() *SystemZyncSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetServer) DeepCopyInto(out *TargetServer) { + *out = *in + if in.ServerAlias != nil { + in, out := &in.ServerAlias, &out.ServerAlias + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetServer. +func (in *TargetServer) DeepCopy() *TargetServer { + if in == nil { + return nil + } + out := new(TargetServer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TwemproxyConfig) DeepCopyInto(out *TwemproxyConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwemproxyConfig. @@ -3368,6 +3399,13 @@ func (in *TwemproxyConfigSpec) DeepCopy() *TwemproxyConfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TwemproxyConfigStatus) DeepCopyInto(out *TwemproxyConfigStatus) { *out = *in + if in.SelectedTargets != nil { + in, out := &in.SelectedTargets, &out.SelectedTargets + *out = make(map[string]TargetServer, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwemproxyConfigStatus. diff --git a/config/crd/bases/saas.3scale.net_redisshards.yaml b/config/crd/bases/saas.3scale.net_redisshards.yaml index d2ca507f..d2bfe47f 100644 --- a/config/crd/bases/saas.3scale.net_redisshards.yaml +++ b/config/crd/bases/saas.3scale.net_redisshards.yaml @@ -77,15 +77,17 @@ spec: description: ShardNodes describes the nodes in the redis shard properties: master: + additionalProperties: + type: string description: Master is the node that acts as master role in the redis shard - type: string + type: object slaves: + additionalProperties: + type: string description: Slaves are the nodes that act as master role in the redis shard - items: - type: string - type: array + type: object type: object type: object type: object diff --git a/config/crd/bases/saas.3scale.net_sentinels.yaml b/config/crd/bases/saas.3scale.net_sentinels.yaml index 2c8f59bb..271fa271 100644 --- a/config/crd/bases/saas.3scale.net_sentinels.yaml +++ b/config/crd/bases/saas.3scale.net_sentinels.yaml @@ -45,6 +45,14 @@ spec: config: description: Config configures the sentinel process properties: + clusterTopology: + additionalProperties: + additionalProperties: + type: string + type: object + description: ClusterTopology indicates the redis servers that + form part of each shard monitored by sentinel + type: object metricsRefreshInterval: description: MetricsRefreshInterval determines the refresh interval for gahtering metrics from sentinel @@ -72,8 +80,6 @@ spec: stored pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - required: - - monitoredShards type: object grafanaDashboard: description: Configures the Grafana Dashboard for the component @@ -482,17 +488,14 @@ spec: description: MonitoredShard contains information of one of the shards monitored by the Sentinel resource properties: - master: - description: Master is the address of the master redis server - of this shard, in the format "127.0.0.1:6379" DEPRECATED - - this field will be removed in an upcoming release - type: string name: description: Name is the name of the redis shard type: string servers: additionalProperties: properties: + address: + type: string config: additionalProperties: type: string @@ -508,20 +511,6 @@ spec: information of each of the RedisServer instances that belong to the MonitoredShard type: object - slavesRO: - description: SlavesRO is the list of addresses of the read-only - slave servers in this shard, in the format "127.0.0.1:6379" - DEPRECATED - this field will be removed in an upcoming release - items: - type: string - type: array - slavesRW: - description: SlavesRW is the list of addresses of the read-write - slave servers in this shard, in the format "127.0.0.1:6379" - DEPRECATED - this field will be removed in an upcoming release - items: - type: string - type: array required: - name type: object diff --git a/config/crd/bases/saas.3scale.net_twemproxyconfigs.yaml b/config/crd/bases/saas.3scale.net_twemproxyconfigs.yaml index 76c7b3a4..1cb63881 100644 --- a/config/crd/bases/saas.3scale.net_twemproxyconfigs.yaml +++ b/config/crd/bases/saas.3scale.net_twemproxyconfigs.yaml @@ -15,7 +15,11 @@ spec: singular: twemproxyconfig scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.targets + name: Selected Targets + type: string + name: v1alpha1 schema: openAPIV3Schema: description: TwemproxyConfig is the Schema for the twemproxyconfigs API @@ -61,7 +65,8 @@ spec: type: string type: array serverPools: - description: ServerPools is the list of Twemproxy server pools + description: 'ServerPools is the list of Twemproxy server pools WARNING: + only 1 pool is supported at this time' items: properties: bindAddress: @@ -126,6 +131,21 @@ spec: type: object status: description: TwemproxyConfigStatus defines the observed state of TwemproxyConfig + properties: + targets: + additionalProperties: + description: Defines a server targeted by one of the TwemproxyConfig + server pools + properties: + serverAddress: + type: string + serverAlias: + type: string + required: + - serverAddress + type: object + description: The list of serves currently targeted by this TwemproxyConfig + type: object type: object type: object served: true diff --git a/config/test/kustomization.yaml b/config/test/kustomization.yaml index d54f044f..9510a1d0 100644 --- a/config/test/kustomization.yaml +++ b/config/test/kustomization.yaml @@ -18,6 +18,12 @@ patches: - op: replace path: /spec/template/spec/containers/0/env/0 value: { "name": "WATCH_NAMESPACE", "value": "" } + - op: add + path: /spec/template/spec/containers/0/env/1 + value: { "name": "LOG_LEVEL", "value": "debug" } + - op: add + path: /spec/template/spec/containers/0/env/1 + value: { "name": "LOG_MODE", "value": "dev" } - target: group: rbac.authorization.k8s.io kind: Role diff --git a/controllers/redisshard_controller.go b/controllers/redisshard_controller.go index 59f21b3c..9d0b83ba 100644 --- a/controllers/redisshard_controller.go +++ b/controllers/redisshard_controller.go @@ -24,14 +24,14 @@ import ( basereconciler "github.com/3scale-ops/basereconciler/reconciler" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" "github.com/3scale/saas-operator/pkg/generators/redisshard" - "github.com/3scale/saas-operator/pkg/redis" - "github.com/3scale/saas-operator/pkg/redis/crud/client" + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/redis/sharded" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -39,7 +39,8 @@ import ( // RedisShardReconciler reconciles a RedisShard object type RedisShardReconciler struct { basereconciler.Reconciler - Log logr.Logger + Log logr.Logger + Pool *redis.ServerPool } // +kubebuilder:rbac:groups=saas.3scale.net,namespace=placeholder,resources=redisshards,verbs=get;list;watch;create;update;patch;delete @@ -79,9 +80,6 @@ func (r *RedisShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) shard, result, err := r.setRedisRoles(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, *instance.Spec.MasterIndex, *instance.Spec.SlaveCount+1, gen.ServiceName(), logger) - // Close Redis clients - defer shard.Cleanup(logger) - if result != nil || err != nil { return *result, err } @@ -103,26 +101,34 @@ func (r *RedisShardReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *RedisShardReconciler) setRedisRoles(ctx context.Context, key types.NamespacedName, masterIndex, replicas int32, serviceName string, log logr.Logger) (*redis.Shard, *ctrl.Result, error) { +func (r *RedisShardReconciler) setRedisRoles(ctx context.Context, key types.NamespacedName, masterIndex, replicas int32, serviceName string, log logr.Logger) (*sharded.Shard, *ctrl.Result, error) { - redisURLs := make([]string, replicas) + var masterHostPort string + redisURLs := make(map[string]string, replicas) for i := 0; i < int(replicas); i++ { pod := &corev1.Pod{} key := types.NamespacedName{Name: fmt.Sprintf("%s-%d", serviceName, i), Namespace: key.Namespace} err := r.Client.Get(ctx, key, pod) if err != nil { - return &redis.Shard{Name: key.Name}, &ctrl.Result{}, err + return &sharded.Shard{Name: key.Name}, &ctrl.Result{}, err + } + if pod.Status.PodIP == "" { + log.Info("waiting for pod IP to be allocated") + return &sharded.Shard{Name: key.Name}, &ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - redisURLs[i] = fmt.Sprintf("redis://%s:%d", pod.Status.PodIP, 6379) + redisURLs[fmt.Sprintf("%s-%d", serviceName, i)] = fmt.Sprintf("redis://%s:%d", pod.Status.PodIP, 6379) + if int(masterIndex) == i { + masterHostPort = fmt.Sprintf("%s:%d", pod.Status.PodIP, 6379) + } } - shard, err := redis.NewShard(key.Name, redisURLs) + shard, err := sharded.NewShard(key.Name, redisURLs, r.Pool) if err != nil { return shard, &ctrl.Result{}, err } - _, err = shard.Init(ctx, masterIndex, log) + _, err = shard.Init(ctx, masterHostPort) if err != nil { log.Info("waiting for redis shard init") return shard, &ctrl.Result{Requeue: true, RequeueAfter: 10 * time.Second}, nil @@ -131,17 +137,17 @@ func (r *RedisShardReconciler) setRedisRoles(ctx context.Context, key types.Name return shard, nil, nil } -func (r *RedisShardReconciler) updateStatus(ctx context.Context, shard *redis.Shard, instance *saasv1alpha1.RedisShard, log logr.Logger) error { +func (r *RedisShardReconciler) updateStatus(ctx context.Context, shard *sharded.Shard, instance *saasv1alpha1.RedisShard, log logr.Logger) error { status := saasv1alpha1.RedisShardStatus{ - ShardNodes: &saasv1alpha1.RedisShardNodes{Master: nil, Slaves: []string{}}, + ShardNodes: &saasv1alpha1.RedisShardNodes{Master: map[string]string{}, Slaves: map[string]string{}}, } for _, server := range shard.Servers { if server.Role == client.Master { - status.ShardNodes.Master = pointer.String(server.Name) + status.ShardNodes.Master[server.GetAlias()] = server.ID() } else if server.Role == client.Slave { - status.ShardNodes.Slaves = append(status.ShardNodes.Slaves, server.Name) + status.ShardNodes.Slaves[server.GetAlias()] = server.ID() } } if !equality.Semantic.DeepEqual(status, instance.Status) { diff --git a/controllers/sentinel_controller.go b/controllers/sentinel_controller.go index 61ef6d6e..33ba7eb5 100644 --- a/controllers/sentinel_controller.go +++ b/controllers/sentinel_controller.go @@ -18,27 +18,32 @@ package controllers import ( "context" - "fmt" + "errors" "time" basereconciler "github.com/3scale-ops/basereconciler/reconciler" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" "github.com/3scale/saas-operator/pkg/generators/sentinel" "github.com/3scale/saas-operator/pkg/reconcilers/threads" - "github.com/3scale/saas-operator/pkg/redis" "github.com/3scale/saas-operator/pkg/redis/events" "github.com/3scale/saas-operator/pkg/redis/metrics" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/redis/sharded" "github.com/go-logr/logr" grafanav1alpha1 "github.com/grafana-operator/grafana-operator/v4/api/integreatly/v1alpha1" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -48,6 +53,7 @@ type SentinelReconciler struct { Log logr.Logger SentinelEvents threads.Manager Metrics threads.Manager + Pool *redis.ServerPool } // +kubebuilder:rbac:groups=saas.3scale.net,namespace=placeholder,resources=sentinels,verbs=get;list;watch;create;update;patch;delete @@ -90,38 +96,28 @@ func (r *SentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, err } - // Create the redis-sentinel server pool - sentinelPool, err := redis.NewSentinelPool(ctx, r.Client, - types.NamespacedName{Name: gen.GetComponent(), Namespace: gen.GetNamespace()}, int(*instance.Spec.Replicas)) - - // Close Redis clients - defer sentinelPool.Cleanup(logger) - + clustermap, err := gen.ClusterTopology(ctx) if err != nil { return ctrl.Result{}, err } - - // Create the ShardedCluster objects that represents the redis servers to be monitored by sentinel - shardedCluster, err := redis.NewShardedCluster(ctx, instance.Spec.Config.MonitoredShards, logger) - - // Close Redis clients - defer shardedCluster.Cleanup(logger) - + shardedCluster, err := sharded.NewShardedCluster(ctx, clustermap, r.Pool) if err != nil { return ctrl.Result{}, err } // Ensure all shards are being monitored - allMonitored, err := sentinelPool.IsMonitoringShards(ctx, shardedCluster.GetShardNames()) - if err != nil { - return ctrl.Result{}, err - } - if !allMonitored { - if err := shardedCluster.Discover(ctx, logger); err != nil { + for _, sentinel := range shardedCluster.Sentinels { + allMonitored, err := sentinel.IsMonitoringShards(ctx, shardedCluster.GetShardNames()) + if err != nil { return ctrl.Result{}, err } - if _, err := sentinelPool.Monitor(ctx, shardedCluster); err != nil { - return ctrl.Result{}, err + if !allMonitored { + if err := shardedCluster.Discover(ctx); err != nil { + return ctrl.Result{}, err + } + if _, err := sentinel.Monitor(ctx, shardedCluster); err != nil { + return ctrl.Result{}, err + } } } @@ -129,16 +125,16 @@ func (r *SentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c eventWatchers := make([]threads.RunnableThread, 0, len(gen.SentinelURIs())) metricsGatherers := make([]threads.RunnableThread, 0, len(gen.SentinelURIs())) for _, uri := range gen.SentinelURIs() { - eventWatchers = append(eventWatchers, &events.SentinelEventWatcher{ - Instance: instance, - SentinelURI: uri, - ExportMetrics: true, - Topology: &shardedCluster, - }) - metricsGatherers = append(metricsGatherers, &metrics.SentinelMetricsGatherer{ - RefreshInterval: *gen.Spec.Config.MetricsRefreshInterval, - SentinelURI: uri, - }) + watcher, err := events.NewSentinelEventWatcher(uri, instance, shardedCluster, true, r.Pool) + if err != nil { + return ctrl.Result{}, err + } + gatherer, err := metrics.NewSentinelMetricsGatherer(uri, *gen.Spec.Config.MetricsRefreshInterval, r.Pool) + if err != nil { + return ctrl.Result{}, err + } + eventWatchers = append(eventWatchers, watcher) + metricsGatherers = append(metricsGatherers, gatherer) } if err := r.SentinelEvents.ReconcileThreads(ctx, instance, eventWatchers, logger.WithName("event-watcher")); err != nil { return ctrl.Result{}, err @@ -148,36 +144,58 @@ func (r *SentinelReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // Reconcile status of the Sentinel resource - if err := r.reconcileStatus(ctx, instance, &gen, sentinelPool, logger); err != nil { + if err := r.reconcileStatus(ctx, instance, shardedCluster, logger); err != nil { return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } -func (r *SentinelReconciler) reconcileStatus(ctx context.Context, instance *saasv1alpha1.Sentinel, gen *sentinel.Generator, - spool redis.SentinelPool, log logr.Logger) error { +func (r *SentinelReconciler) reconcileStatus(ctx context.Context, instance *saasv1alpha1.Sentinel, cluster *sharded.Cluster, + log logr.Logger) error { - monitoredShards, err := spool.MonitoredShards(ctx, saasv1alpha1.SentinelDefaultQuorum, redis.SlaveReadOnlyDiscoveryOpt, redis.SaveConfigDiscoveryOpt) - if err != nil { - return err + // sentinels info to the status + sentinels := make([]string, len(cluster.Sentinels)) + for idx, srv := range cluster.Sentinels { + sentinels[idx] = srv.ID() } - replicas := int(*gen.Spec.Replicas) - addressList := make([]string, 0, replicas) + // redis shards info to the status + merr := cluster.SentinelDiscover(ctx, sharded.SlaveReadOnlyDiscoveryOpt, sharded.SaveConfigDiscoveryOpt) + // if the failure occurred calling sentinel discard the result and return error + // otherwise keep going on and use the information that was returned, even if there were some + // other errors + sentinelError := &sharded.DiscoveryError_Sentinel_Failure{} + if errors.As(merr, sentinelError) { + return merr + } + // We don't want the controller to keep failing while things reconfigure as + // this makes controller throttling to kick in. Instead, just log the errors + // and rely on reconciles triggered by sentinel events to correct the situation. + masterError := &sharded.DiscoveryError_Master_SingleServerFailure{} + slaveError := &sharded.DiscoveryError_Slave_SingleServerFailure{} + if errors.As(merr, masterError) || errors.As(merr, slaveError) { + log.Error(merr, "DiscoveryError") + } - for i := 0; i < replicas; i++ { - key := types.NamespacedName{Name: gen.PodServiceName(i), Namespace: instance.GetNamespace()} - svc := &corev1.Service{} - if err := r.Client.Get(ctx, key, svc); err != nil { - return err + shards := make(saasv1alpha1.MonitoredShards, len(cluster.Shards)) + for idx, shard := range cluster.Shards { + shards[idx] = saasv1alpha1.MonitoredShard{ + Name: shard.Name, + Servers: make(map[string]saasv1alpha1.RedisServerDetails, len(shard.Servers)), + } + for _, srv := range shard.Servers { + shards[idx].Servers[srv.GetAlias()] = saasv1alpha1.RedisServerDetails{ + Role: srv.Role, + Address: srv.ID(), + Config: srv.Config, + } } - addressList = append(addressList, fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, saasv1alpha1.SentinelPort)) } status := saasv1alpha1.SentinelStatus{ - Sentinels: addressList, - MonitoredShards: monitoredShards, + Sentinels: sentinels, + MonitoredShards: shards, } if !equality.Semantic.DeepEqual(status, instance.Status) { @@ -185,6 +203,7 @@ func (r *SentinelReconciler) reconcileStatus(ctx context.Context, instance *saas if err := r.Client.Status().Update(ctx, instance); err != nil { return err } + log.Info("status updated") } return nil @@ -200,5 +219,19 @@ func (r *SentinelReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&grafanav1alpha1.GrafanaDashboard{}). Owns(&corev1.ConfigMap{}). Watches(&source.Channel{Source: r.SentinelEvents.GetChannel()}, &handler.EnqueueRequestForObject{}). + WithOptions(controller.Options{ + RateLimiter: AggressiveRateLimiter(), + }). Complete(r) } + +func AggressiveRateLimiter() ratelimiter.RateLimiter { + // return workqueue.DefaultControllerRateLimiter() + return workqueue.NewMaxOfRateLimiter( + // First retries are more spaced that default + // Max retry time is limited to 10 seconds + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 10*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index a8f49072..ed2be372 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -28,6 +28,7 @@ import ( marin3rv1alpha1 "github.com/3scale-ops/marin3r/apis/marin3r/v1alpha1" "github.com/3scale/saas-operator/pkg/reconcilers/threads" "github.com/3scale/saas-operator/pkg/reconcilers/workloads" + redis "github.com/3scale/saas-operator/pkg/redis/server" "github.com/goombaio/namegenerator" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -113,6 +114,8 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) }() + redisPool := redis.NewServerPool() + // Add controllers for testing err = (&AutoSSLReconciler{ WorkloadReconciler: workloads.NewFromManager(mgr), @@ -153,6 +156,7 @@ var _ = BeforeSuite(func() { err = (&RedisShardReconciler{ Reconciler: basereconciler.NewFromManager(mgr), Log: ctrl.Log.WithName("controllers").WithName("RedisShard"), + Pool: redisPool, }).SetupWithManager(mgr) Expect(err).ToNot(HaveOccurred()) @@ -160,6 +164,7 @@ var _ = BeforeSuite(func() { Reconciler: basereconciler.NewFromManager(mgr), SentinelEvents: threads.NewManager(), Metrics: threads.NewManager(), + Pool: redisPool, Log: ctrl.Log.WithName("controllers").WithName("Sentinel"), }).SetupWithManager(mgr) Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/twemproxyconfig_controller.go b/controllers/twemproxyconfig_controller.go index cdb751f3..57cdc554 100644 --- a/controllers/twemproxyconfig_controller.go +++ b/controllers/twemproxyconfig_controller.go @@ -28,15 +28,18 @@ import ( "github.com/3scale/saas-operator/pkg/generators/twemproxyconfig" "github.com/3scale/saas-operator/pkg/reconcilers/threads" "github.com/3scale/saas-operator/pkg/redis/events" + redis "github.com/3scale/saas-operator/pkg/redis/server" "github.com/3scale/saas-operator/pkg/util" "github.com/go-logr/logr" grafanav1alpha1 "github.com/grafana-operator/grafana-operator/v4/api/integreatly/v1alpha1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -48,6 +51,7 @@ type TwemproxyConfigReconciler struct { basereconciler.Reconciler Log logr.Logger SentinelEvents threads.Manager + Pool *redis.ServerPool } // +kubebuilder:rbac:groups=saas.3scale.net,namespace=placeholder,resources=twemproxyconfigs,verbs=get;list;watch;create;update;patch;delete @@ -77,12 +81,12 @@ func (r *TwemproxyConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ // Generate the ConfigMap gen, err := twemproxyconfig.NewGenerator( - ctx, instance, r.Client, logger.WithName("generator"), + ctx, instance, r.Client, r.Pool, logger.WithName("generator"), ) - if err != nil { return ctrl.Result{}, err } + cm, err := gen.ConfigMap().Build(ctx, r.Client) if err != nil { return ctrl.Result{}, err @@ -105,11 +109,11 @@ func (r *TwemproxyConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ // Reconcile sentinel event watchers eventWatchers := make([]threads.RunnableThread, 0, len(gen.Spec.SentinelURIs)) for _, uri := range gen.Spec.SentinelURIs { - eventWatchers = append(eventWatchers, &events.SentinelEventWatcher{ - Instance: instance, - SentinelURI: uri, - ExportMetrics: false, - }) + watcher, err := events.NewSentinelEventWatcher(uri, instance, nil, false, r.Pool) + if err != nil { + return ctrl.Result{}, err + } + eventWatchers = append(eventWatchers, watcher) } r.SentinelEvents.ReconcileThreads(ctx, instance, eventWatchers, logger.WithName("event-watcher")) @@ -120,6 +124,11 @@ func (r *TwemproxyConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } + // Reconcile status of the TwemproxyConfig resource + if err := r.reconcileStatus(ctx, &gen, instance, logger); err != nil { + return ctrl.Result{}, err + } + // Reconcile periodically in case some event is lost ... return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } @@ -226,6 +235,33 @@ func (r *TwemproxyConfigReconciler) syncPod(ctx context.Context, pod corev1.Pod, } } +func (r *TwemproxyConfigReconciler) reconcileStatus(ctx context.Context, gen *twemproxyconfig.Generator, + instance *saasv1alpha1.TwemproxyConfig, log logr.Logger) error { + selectedTargets := map[string]saasv1alpha1.TargetServer{} + + // The TwemproxyConfig api was initially conceived to support several server pools + // but this is actually not used, so just assume there's only one pool for simplicity + for pshard, server := range gen.GetTargets(gen.Spec.ServerPools[0].Name) { + selectedTargets[pshard] = saasv1alpha1.TargetServer{ + ServerAlias: util.Pointer(server.Alias()), + ServerAddress: server.Address, + } + } + + status := saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: selectedTargets, + } + if !equality.Semantic.DeepEqual(status, instance.Status) { + instance.Status = status + if err := r.Client.Status().Update(ctx, instance); err != nil { + return err + } + log.Info("status updated") + } + + return nil +} + // SetupWithManager sets up the controller with the Manager. func (r *TwemproxyConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -233,5 +269,10 @@ func (r *TwemproxyConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.ConfigMap{}). Owns(&grafanav1alpha1.GrafanaDashboard{}). Watches(&source.Channel{Source: r.SentinelEvents.GetChannel()}, &handler.EnqueueRequestForObject{}). + WithOptions(controller.Options{ + RateLimiter: AggressiveRateLimiter(), + // this allows for different resources to be reconciled in parallel + MaxConcurrentReconciles: 2, + }). Complete(r) } diff --git a/examples/backend/redis-v4/sentinel.yaml b/examples/backend/redis-v4/sentinel.yaml index a11a0d57..ec0994fb 100644 --- a/examples/backend/redis-v4/sentinel.yaml +++ b/examples/backend/redis-v4/sentinel.yaml @@ -5,15 +5,14 @@ metadata: spec: replicas: 3 config: - monitoredShards: + clusterTopology: # DNS should not be used in production. DNS is - # used here for convenience as redis IPs might change - # inside the cluster. + # just convenient for testing purposes shard01: - - redis://redis-shard-shard01-0.redis-shard-shard01:6379 - - redis://redis-shard-shard01-1.redis-shard-shard01:6379 - - redis://redis-shard-shard01-2.redis-shard-shard01:6379 + shard01-0: redis://redis-shard-shard01-0.redis-shard-shard01:6379 + shard01-1: redis://redis-shard-shard01-1.redis-shard-shard01:6379 + shard01-2: redis://redis-shard-shard01-2.redis-shard-shard01:6379 shard02: - - redis://redis-shard-shard02-0.redis-shard-shard02:6379 - - redis://redis-shard-shard02-1.redis-shard-shard02:6379 - - redis://redis-shard-shard02-2.redis-shard-shard02:6379 + shard02-0: redis://redis-shard-shard02-0.redis-shard-shard02:6379 + shard02-1: redis://redis-shard-shard02-1.redis-shard-shard02:6379 + shard02-2: redis://redis-shard-shard02-2.redis-shard-shard02:6379 diff --git a/go.mod b/go.mod index 86bbbab3..154abc2b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/3scale/saas-operator -go 1.19 +go 1.20 require ( github.com/3scale-ops/basereconciler v0.1.0 @@ -24,6 +24,7 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1 github.com/prometheus/client_golang v1.14.0 go.uber.org/zap v1.24.0 + golang.org/x/time v0.3.0 google.golang.org/protobuf v1.29.1 k8s.io/api v0.26.2 k8s.io/apimachinery v0.26.2 @@ -79,7 +80,6 @@ require ( golang.org/x/sys v0.6.0 // indirect golang.org/x/term v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/main.go b/main.go index effdc16c..730692f3 100644 --- a/main.go +++ b/main.go @@ -34,6 +34,7 @@ import ( "github.com/3scale/saas-operator/controllers" "github.com/3scale/saas-operator/pkg/reconcilers/threads" "github.com/3scale/saas-operator/pkg/reconcilers/workloads" + redis "github.com/3scale/saas-operator/pkg/redis/server" "github.com/3scale/saas-operator/pkg/util" "github.com/3scale/saas-operator/pkg/version" externalsecretsv1beta1 "github.com/external-secrets/external-secrets/apis/externalsecrets/v1beta1" @@ -54,7 +55,6 @@ const ( // which specifies the Namespace to watch. // An empty value means the operator is running with cluster scope. watchNamespaceEnvVar string = "WATCH_NAMESPACE" - debugLevelEnvVar string = "DEBUG_LEVEL" ) var ( @@ -127,11 +127,13 @@ func main() { /* BASERECONCILER_V2 BASED CONTROLLERS*/ + redisPool := redis.NewServerPool() if err = (&controllers.SentinelReconciler{ Reconciler: basereconciler.NewFromManager(mgr), SentinelEvents: threads.NewManager(), Metrics: threads.NewManager(), Log: ctrl.Log.WithName("controllers").WithName("Sentinel"), + Pool: redisPool, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Sentinel") os.Exit(1) @@ -139,6 +141,7 @@ func main() { if err = (&controllers.RedisShardReconciler{ Reconciler: basereconciler.NewFromManager(mgr), Log: ctrl.Log.WithName("controllers").WithName("RedisShard"), + Pool: redisPool, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisShard") os.Exit(1) @@ -147,6 +150,7 @@ func main() { Reconciler: basereconciler.NewFromManager(mgr), SentinelEvents: threads.NewManager(), Log: ctrl.Log.WithName("controllers").WithName("TwemproxyConfig"), + Pool: redisPool, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "TwemproxyConfig") os.Exit(1) diff --git a/pkg/assets/bindata.go b/pkg/assets/bindata.go index c78b1d7b..5bc7172d 100644 --- a/pkg/assets/bindata.go +++ b/pkg/assets/bindata.go @@ -101,7 +101,7 @@ func dashboardsApicastServicesJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/apicast-services.json.gtpl", size: 17460, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/apicast-services.json.gtpl", size: 17460, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -121,7 +121,7 @@ func dashboardsApicastJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/apicast.json.gtpl", size: 84552, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/apicast.json.gtpl", size: 84552, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -141,7 +141,7 @@ func dashboardsAutosslJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/autossl.json.gtpl", size: 59366, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/autossl.json.gtpl", size: 59366, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -161,7 +161,7 @@ func dashboardsBackendJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/backend.json.gtpl", size: 122812, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/backend.json.gtpl", size: 122812, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -181,7 +181,7 @@ func dashboardsCorsProxyJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/cors-proxy.json.gtpl", size: 78737, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/cors-proxy.json.gtpl", size: 78737, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -201,7 +201,7 @@ func dashboardsMappingServiceJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/mapping-service.json.gtpl", size: 70528, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/mapping-service.json.gtpl", size: 70528, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -221,7 +221,7 @@ func dashboardsRedisSentinelJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/redis-sentinel.json.gtpl", size: 131661, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/redis-sentinel.json.gtpl", size: 131661, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -241,7 +241,7 @@ func dashboardsSystemJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/system.json.gtpl", size: 81338, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/system.json.gtpl", size: 81338, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -261,7 +261,7 @@ func dashboardsTwemproxyJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/twemproxy.json.gtpl", size: 130875, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/twemproxy.json.gtpl", size: 130875, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -281,7 +281,7 @@ func dashboardsZyncJsonGtpl() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "dashboards/zync.json.gtpl", size: 70304, mode: os.FileMode(436), modTime: time.Unix(1679645064, 0)} + info := bindataFileInfo{name: "dashboards/zync.json.gtpl", size: 70304, mode: os.FileMode(420), modTime: time.Unix(1679676915, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/generators/redisshard/statefulset.go b/pkg/generators/redisshard/statefulset.go index 48f32477..d2850bb7 100644 --- a/pkg/generators/redisshard/statefulset.go +++ b/pkg/generators/redisshard/statefulset.go @@ -40,6 +40,7 @@ func (gen *Generator) statefulSet() func() *appsv1.StatefulSet { } return nil }(), + RestartPolicy: corev1.RestartPolicyAlways, Containers: []corev1.Container{ { Command: []string{"redis-server", "/redis/redis.conf"}, diff --git a/pkg/generators/sentinel/generator.go b/pkg/generators/sentinel/generator.go index 5cc7282f..02d05aa1 100644 --- a/pkg/generators/sentinel/generator.go +++ b/pkg/generators/sentinel/generator.go @@ -1,6 +1,12 @@ package sentinel import ( + "context" + "fmt" + "net" + "net/url" + "strings" + basereconciler "github.com/3scale-ops/basereconciler/reconciler" basereconciler_resources "github.com/3scale-ops/basereconciler/resources" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" @@ -8,6 +14,7 @@ import ( "github.com/3scale/saas-operator/pkg/generators/sentinel/config" "github.com/3scale/saas-operator/pkg/resource_builders/grafanadashboard" "github.com/3scale/saas-operator/pkg/resource_builders/pdb" + "github.com/3scale/saas-operator/pkg/util" ) const ( @@ -71,3 +78,73 @@ func (gen *Generator) Resources() []basereconciler.Resource { return resources } + +func (gen *Generator) ClusterTopology(ctx context.Context) (map[string]map[string]string, error) { + + clustermap := map[string]map[string]string{} + + if gen.Spec.Config.ClusterTopology != nil { + for shard, serversdef := range gen.Spec.Config.ClusterTopology { + shardmap := map[string]string{} + for alias, server := range serversdef { + // the redis servers must be defined using IP + // addresses, so this tries to resolve a hostname + // if present in the connection string. + u, err := url.Parse(server) + if err != nil { + return nil, err + } + ip, err := util.LookupIPv4(ctx, u.Hostname()) + if err != nil { + return nil, err + } + u.Host = net.JoinHostPort(ip, u.Port()) + if err != nil { + return nil, err + } + shardmap[alias] = u.String() + } + clustermap[shard] = shardmap + } + + } else if gen.Spec.Config.MonitoredShards != nil { + for shard, servers := range gen.Spec.Config.MonitoredShards { + shardmap := map[string]string{} + for _, server := range servers { + // the redis servers must be defined using IP + // addresses, so this tries to resolve a hostname + // if present in the connection string. + u, err := url.Parse(server) + if err != nil { + return nil, err + } + alias := u.Host + ip, err := util.LookupIPv4(ctx, u.Hostname()) + if err != nil { + return nil, err + } + u.Host = net.JoinHostPort(ip, u.Port()) + if err != nil { + return nil, err + } + shardmap[alias] = u.String() + } + clustermap[shard] = shardmap + } + + } else { + return nil, fmt.Errorf("either 'spec.config.clusterTopology' or 'spec.cluster.MonitoredShards' must be set") + } + + clustermap["sentinel"] = make(map[string]string, int(*gen.Spec.Replicas)) + for _, uri := range gen.SentinelURIs() { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + alias := strings.Split(u.Hostname(), ".")[0] + clustermap["sentinel"][alias] = u.String() + } + + return clustermap, nil +} diff --git a/pkg/generators/sentinel/generator_test.go b/pkg/generators/sentinel/generator_test.go new file mode 100644 index 00000000..85ab788c --- /dev/null +++ b/pkg/generators/sentinel/generator_test.go @@ -0,0 +1,120 @@ +package sentinel + +import ( + "context" + "testing" + + saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-test/deep" + "k8s.io/apimachinery/pkg/types" +) + +func TestGenerator_ClusterTopology(t *testing.T) { + type args struct { + ctx context.Context + } + tests := []struct { + name string + key types.NamespacedName + spec saasv1alpha1.SentinelSpec + args args + want map[string]map[string]string + wantErr bool + }{ + { + name: "Generates a correct cluster topology from 'spec.config.monitoredShards'", + key: types.NamespacedName{Name: "test", Namespace: "test"}, + spec: saasv1alpha1.SentinelSpec{ + Replicas: util.Pointer(int32(3)), + Config: &saasv1alpha1.SentinelConfig{ + MonitoredShards: map[string][]string{ + "shard01": { + "redis://localhost:1000", + "redis://localhost:2000", + "redis://localhost:3000", + }, + "shard02": { + "redis://localhost:4000", + "redis://localhost:5000", + "redis://localhost:6000", + }}}, + }, + args: args{ + ctx: context.TODO(), + }, + want: map[string]map[string]string{ + "shard01": { + "localhost:1000": "redis://127.0.0.1:1000", + "localhost:2000": "redis://127.0.0.1:2000", + "localhost:3000": "redis://127.0.0.1:3000", + }, + "shard02": { + "localhost:4000": "redis://127.0.0.1:4000", + "localhost:5000": "redis://127.0.0.1:5000", + "localhost:6000": "redis://127.0.0.1:6000", + }, + "sentinel": { + "redis-sentinel-0": "redis://redis-sentinel-0.test.svc.cluster.local:26379", + "redis-sentinel-1": "redis://redis-sentinel-1.test.svc.cluster.local:26379", + "redis-sentinel-2": "redis://redis-sentinel-2.test.svc.cluster.local:26379", + }, + }, + wantErr: false, + }, + { + name: "Generates a correct cluster topology from 'spec.config.clusterTopology'", + key: types.NamespacedName{Name: "test", Namespace: "test"}, + spec: saasv1alpha1.SentinelSpec{ + Replicas: util.Pointer(int32(3)), + Config: &saasv1alpha1.SentinelConfig{ + ClusterTopology: map[string]map[string]string{ + "shard01": { + "srv1": "redis://localhost:1000", + "srv2": "redis://localhost:2000", + "srv3": "redis://localhost:3000", + }, + "shard02": { + "srv4": "redis://localhost:4000", + "srv5": "redis://localhost:5000", + "srv6": "redis://localhost:6000", + }}}, + }, + args: args{ + ctx: context.TODO(), + }, + want: map[string]map[string]string{ + "shard01": { + "srv1": "redis://127.0.0.1:1000", + "srv2": "redis://127.0.0.1:2000", + "srv3": "redis://127.0.0.1:3000", + }, + "shard02": { + "srv4": "redis://127.0.0.1:4000", + "srv5": "redis://127.0.0.1:5000", + "srv6": "redis://127.0.0.1:6000", + }, + "sentinel": { + "redis-sentinel-0": "redis://redis-sentinel-0.test.svc.cluster.local:26379", + "redis-sentinel-1": "redis://redis-sentinel-1.test.svc.cluster.local:26379", + "redis-sentinel-2": "redis://redis-sentinel-2.test.svc.cluster.local:26379", + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + gen := NewGenerator("test", "test", tt.spec) + got, err := gen.ClusterTopology(tt.args.ctx) + if (err != nil) != tt.wantErr { + t.Errorf("Generator.ClusterTopology() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("Generator.ClusterTopology() = got diff %v", diff) + } + }) + } +} diff --git a/pkg/generators/twemproxyconfig/generator.go b/pkg/generators/twemproxyconfig/generator.go index 8d21909f..8b2fac3b 100644 --- a/pkg/generators/twemproxyconfig/generator.go +++ b/pkg/generators/twemproxyconfig/generator.go @@ -2,14 +2,17 @@ package twemproxyconfig import ( "context" + "errors" "fmt" - "sort" + "net/url" + "strings" basereconciler "github.com/3scale-ops/basereconciler/reconciler" basereconciler_resources "github.com/3scale-ops/basereconciler/resources" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" "github.com/3scale/saas-operator/pkg/generators" - "github.com/3scale/saas-operator/pkg/redis" + "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/redis/sharded" "github.com/3scale/saas-operator/pkg/resource_builders/grafanadashboard" "github.com/3scale/saas-operator/pkg/resource_builders/twemproxy" "github.com/go-logr/logr" @@ -48,7 +51,8 @@ type Generator struct { } // NewGenerator returns a new Options struct -func NewGenerator(ctx context.Context, instance *saasv1alpha1.TwemproxyConfig, cl client.Client, log logr.Logger) (Generator, error) { +func NewGenerator(ctx context.Context, instance *saasv1alpha1.TwemproxyConfig, cl client.Client, + pool *server.ServerPool, log logr.Logger) (Generator, error) { gen := Generator{ BaseOptionsV2: generators.BaseOptionsV2{ @@ -71,9 +75,18 @@ func NewGenerator(ctx context.Context, instance *saasv1alpha1.TwemproxyConfig, c } } - gen.masterTargets, err = gen.getMonitoredMasters( - ctx, log.WithName("masterTargets"), - ) + clustermap := map[string]map[string]string{} + clustermap["sentinel"] = make(map[string]string, len(gen.Spec.SentinelURIs)) + for _, uri := range gen.Spec.SentinelURIs { + u, err := url.Parse(uri) + if err != nil { + return Generator{}, err + } + alias := strings.Split(u.Hostname(), ".")[0] + clustermap["sentinel"][alias] = u.String() + } + + shardedCluster, err := sharded.NewShardedCluster(ctx, clustermap, pool) if err != nil { return Generator{}, err } @@ -85,9 +98,38 @@ func NewGenerator(ctx context.Context, instance *saasv1alpha1.TwemproxyConfig, c discoverSlavesRW = true } } - if discoverSlavesRW { + + switch discoverSlavesRW { + + case false: + // any error discovering masters should return + if merr := shardedCluster.SentinelDiscover(ctx, sharded.OnlyMasterDiscoveryOpt); merr != nil { + return Generator{}, merr + } + gen.masterTargets, err = gen.getMonitoredMasters(ctx, shardedCluster, log.WithName("masterTargets")) + if err != nil { + return Generator{}, err + } + + case true: + merr := shardedCluster.SentinelDiscover(ctx, sharded.SlaveReadOnlyDiscoveryOpt) + if merr != nil { + log.Error(merr, "DiscoveryError") + // Only sentinel/master discovery errors should return. + // Slave failures will just failover to the master without returning error (although it will be logged) + sentinelError := &sharded.DiscoveryError_Sentinel_Failure{} + masterError := &sharded.DiscoveryError_Master_SingleServerFailure{} + if errors.As(merr, sentinelError) || errors.As(merr, masterError) { + return Generator{}, merr + } + } + + gen.masterTargets, err = gen.getMonitoredMasters(ctx, shardedCluster, log.WithName("masterTargets")) + if err != nil { + return Generator{}, err + } gen.slaverwTargets, err = gen.getMonitoredReadWriteSlavesWithFallbackToMasters( - ctx, log.WithName("slaverwTargets"), + ctx, shardedCluster, log.WithName("slaverwTargets"), ) if err != nil { return Generator{}, err @@ -97,6 +139,19 @@ func NewGenerator(ctx context.Context, instance *saasv1alpha1.TwemproxyConfig, c return gen, nil } +func (gen *Generator) GetTargets(poolName string) map[string]twemproxy.Server { + for _, pool := range gen.Spec.ServerPools { + if pool.Name == poolName { + if *pool.Target == saasv1alpha1.Masters { + return gen.masterTargets + } else { + return gen.slaverwTargets + } + } + } + return nil +} + func discoverSentinels(ctx context.Context, cl client.Client, namespace string) ([]string, error) { sl := &saasv1alpha1.SentinelList{} if err := cl.List(ctx, sl, client.InNamespace(namespace)); err != nil { @@ -115,92 +170,43 @@ func discoverSentinels(ctx context.Context, cl client.Client, namespace string) return uris, nil } -func (gen *Generator) getMonitoredMasters(ctx context.Context, log logr.Logger) (map[string]twemproxy.Server, error) { - - spool := make(redis.SentinelPool, 0, len(gen.Spec.SentinelURIs)) +func (gen *Generator) getMonitoredMasters(ctx context.Context, + cluster *sharded.Cluster, log logr.Logger) (map[string]twemproxy.Server, error) { - for _, uri := range gen.Spec.SentinelURIs { - sentinel, err := redis.NewSentinelServerFromConnectionString("sentinel", uri) - defer sentinel.Cleanup(log) - if err != nil { - return nil, err - } - - spool = append(spool, *sentinel) - } - - monitoredShards, err := spool.MonitoredShards(ctx, 2, redis.OnlyMasterDiscoveryOpt) - if err != nil { - return nil, err - } - - m := make(map[string]twemproxy.Server, len(monitoredShards)) - for _, shard := range monitoredShards { - masterAddress, _, err := shard.GetMaster() + m := make(map[string]twemproxy.Server, len(cluster.Shards)) + for _, shard := range cluster.Shards { + master, err := shard.GetMaster() if err != nil { return nil, err } m[shard.Name] = twemproxy.Server{ - Name: shard.Name, - Address: masterAddress, + Address: master.ID(), Priority: 1, } + m[shard.Name] = twemproxy.NewServer(master.ID(), master.GetAlias()) } return m, nil } -func (gen *Generator) getMonitoredReadWriteSlavesWithFallbackToMasters(ctx context.Context, log logr.Logger) (map[string]twemproxy.Server, error) { +func (gen *Generator) getMonitoredReadWriteSlavesWithFallbackToMasters(ctx context.Context, + cluster *sharded.Cluster, log logr.Logger) (map[string]twemproxy.Server, error) { - spool := make(redis.SentinelPool, 0, len(gen.Spec.SentinelURIs)) - - for _, uri := range gen.Spec.SentinelURIs { - sentinel, err := redis.NewSentinelServerFromConnectionString("sentinel", uri) - defer sentinel.Cleanup(log) - if err != nil { - return nil, err - } - - spool = append(spool, *sentinel) - } - - monitoredShards, err := spool.MonitoredShards(ctx, 2, redis.SlaveReadOnlyDiscoveryOpt) - if err != nil { - return nil, err - } - - m := make(map[string]twemproxy.Server, len(monitoredShards)) - for _, shard := range monitoredShards { + m := make(map[string]twemproxy.Server, len(cluster.Shards)) + for _, shard := range cluster.Shards { if slavesRW := shard.GetSlavesRW(); len(slavesRW) > 0 { - // In the (unlikely) case that there are more than 1 slaveRW - // we need to consistenly choose the same in all reconcile loops, otherwise - // we would be forcing twemproxy restart if we are constantly changing the chosen server. - // Due to the lack of a better criteria, we just choose the server address that scores - // lowest in alphabetical order. - var address []string - for k := range slavesRW { - address = append(address, k) - } - sort.Strings(address) - m[shard.Name] = twemproxy.Server{ - Name: shard.Name, - Address: address[0], - Priority: 1, - } + m[shard.Name] = twemproxy.NewServer(slavesRW[0].ID(), slavesRW[0].GetAlias()) slaveRwConfigured.With(prometheus.Labels{"twemproxy_config": gen.InstanceName, "shard": shard.Name}).Set(1) + } else { - // Fall back to masters if there are no - // available RW slaves - masterAddress, _, err := shard.GetMaster() + // Fall back to the master if there are no + // available RW slaves for this shard + master, err := shard.GetMaster() if err != nil { return nil, err } - m[shard.Name] = twemproxy.Server{ - Name: shard.Name, - Address: masterAddress, - Priority: 1, - } + m[shard.Name] = twemproxy.NewServer(master.ID(), master.GetAlias()) slaveRwConfigured.With(prometheus.Labels{"twemproxy_config": gen.InstanceName, "shard": shard.Name}).Set(0) } } diff --git a/pkg/generators/twemproxyconfig/generator_test.go b/pkg/generators/twemproxyconfig/generator_test.go new file mode 100644 index 00000000..db8f7eef --- /dev/null +++ b/pkg/generators/twemproxyconfig/generator_test.go @@ -0,0 +1,644 @@ +package twemproxyconfig + +import ( + "context" + "fmt" + "testing" + + saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" + "github.com/3scale/saas-operator/pkg/generators" + redis_client "github.com/3scale/saas-operator/pkg/redis/client" + "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/resource_builders/twemproxy" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-logr/logr" + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestNewGenerator(t *testing.T) { + type args struct { + ctx context.Context + instance *saasv1alpha1.TwemproxyConfig + cl client.Client + pool *server.ServerPool + log logr.Logger + } + tests := []struct { + name string + args args + want Generator + wantErr bool + }{ + { + name: "Populates the generation with the current cluster topology (target masters)", + args: args{ + ctx: context.TODO(), + instance: &saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.Masters), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + }, + cl: nil, + pool: server.NewServerPool( + // redis servers + server.NewFakeServerWithFakeClient("127.0.0.1", "1000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "2000", redis_client.NewPredefinedRedisFakeResponse("role-slave", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "3000", redis_client.NewPredefinedRedisFakeResponse("role-slave", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "4000", redis_client.NewPredefinedRedisFakeResponse("role-slave", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "5000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "6000", redis_client.NewPredefinedRedisFakeResponse("role-slave", nil)), + // sentinel + server.NewFakeServerWithFakeClient("127.0.0.1", "26379", + redis_client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + ), + ), + log: logr.Discard(), + }, + want: Generator{ + BaseOptionsV2: generators.BaseOptionsV2{ + Component: "twemproxy", + InstanceName: "test", + Namespace: "test", + Labels: map[string]string{ + "app": component, + "part-of": "3scale-saas", + }}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.Masters), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + masterTargets: map[string]twemproxy.Server{ + "shard0": { + Address: "127.0.0.1:1000", + Priority: 1, + }, + "shard1": { + Address: "127.0.0.1:5000", + Priority: 1, + }, + }, + slaverwTargets: nil, + }, + wantErr: false, + }, + { + name: "Returns error (target masters)", + args: args{ + ctx: context.TODO(), + instance: &saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.Masters), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + }, + cl: nil, + pool: server.NewServerPool( + // sentinel + server.NewFakeServerWithFakeClient("127.0.0.1", "26379", + redis_client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return fmt.Errorf("error") }, + }, + ), + ), + log: logr.Discard(), + }, + want: Generator{}, + wantErr: true, + }, + { + name: "Populates the generation with the current cluster topology (target rw slaves)", + args: args{ + ctx: context.TODO(), + instance: &saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.SlavesRW), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + }, + cl: nil, + pool: server.NewServerPool( + // redis servers + server.NewFakeServerWithFakeClient("127.0.0.1", "1000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "2000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "3000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "4000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "5000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "6000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + // sentinel + server.NewFakeServerWithFakeClient("127.0.0.1", "26379", + redis_client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelSlaves (shard0) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelSlaves (shard1) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:4000", + "ip", "127.0.0.1", + "port", "4000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:6000", + "ip", "127.0.0.1", + "port", "6000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + ), + ), + log: logr.Discard(), + }, + want: Generator{ + BaseOptionsV2: generators.BaseOptionsV2{ + Component: "twemproxy", + InstanceName: "test", + Namespace: "test", + Labels: map[string]string{ + "app": component, + "part-of": "3scale-saas", + }}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.SlavesRW), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + masterTargets: map[string]twemproxy.Server{ + "shard0": { + Address: "127.0.0.1:1000", + Priority: 1, + }, + "shard1": { + Address: "127.0.0.1:5000", + Priority: 1, + }, + }, + slaverwTargets: map[string]twemproxy.Server{ + "shard0": { + Address: "127.0.0.1:2000", + Priority: 1, + }, + "shard1": { + Address: "127.0.0.1:4000", + Priority: 1, + }, + }, + }, + wantErr: false, + }, + { + name: "Populates the generation with the current cluster topology (target rw slaves with failover to master)", + args: args{ + ctx: context.TODO(), + instance: &saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.SlavesRW), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + }, + cl: nil, + pool: server.NewServerPool( + // redis servers + server.NewFakeServerWithFakeClient("127.0.0.1", "1000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "2000"), // is down + server.NewFakeServerWithFakeClient("127.0.0.1", "3000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "4000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "5000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "6000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + // sentinel + server.NewFakeServerWithFakeClient("127.0.0.1", "26379", + redis_client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelSlaves (shard0) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave,s_down", // slave is down + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelSlaves (shard1) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:4000", + "ip", "127.0.0.1", + "port", "4000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:6000", + "ip", "127.0.0.1", + "port", "6000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + ), + ), + log: logr.Discard(), + }, + want: Generator{ + BaseOptionsV2: generators.BaseOptionsV2{ + Component: "twemproxy", + InstanceName: "test", + Namespace: "test", + Labels: map[string]string{ + "app": component, + "part-of": "3scale-saas", + }}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.SlavesRW), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + masterTargets: map[string]twemproxy.Server{ + "shard0": { + Address: "127.0.0.1:1000", + Priority: 1, + }, + "shard1": { + Address: "127.0.0.1:5000", + Priority: 1, + }, + }, + slaverwTargets: map[string]twemproxy.Server{ + "shard0": { + Address: "127.0.0.1:1000", + Priority: 1, + }, + "shard1": { + Address: "127.0.0.1:4000", + Priority: 1, + }, + }, + }, + wantErr: false, + }, + { + name: "Returns error (target rw slaves)", + args: args{ + ctx: context.TODO(), + instance: &saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "test"}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + SentinelURIs: []string{"redis://127.0.0.1:26379"}, + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Target: util.Pointer(saasv1alpha1.SlavesRW), + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: "shard0"}, + {ShardName: "l-shard01", PhysicalShard: "shard0"}, + {ShardName: "l-shard02", PhysicalShard: "shard0"}, + {ShardName: "l-shard03", PhysicalShard: "shard1"}, + {ShardName: "l-shard04", PhysicalShard: "shard1"}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + ReconcileServerPools: util.Pointer(true), + }, + }, + cl: nil, + pool: server.NewServerPool( + // redis servers + server.NewFakeServerWithFakeClient("127.0.0.1", "1000"), // is down + server.NewFakeServerWithFakeClient("127.0.0.1", "4000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + server.NewFakeServerWithFakeClient("127.0.0.1", "5000", redis_client.NewPredefinedRedisFakeResponse("role-master", nil)), + server.NewFakeServerWithFakeClient("127.0.0.1", "6000", + redis_client.NewPredefinedRedisFakeResponse("role-slave", nil), + redis_client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + // sentinel + server.NewFakeServerWithFakeClient("127.0.0.1", "26379", + redis_client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master,o_down"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &redis_client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + redis_client.FakeResponse{ + // cmd: SentinelSlaves (shard1) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:4000", + "ip", "127.0.0.1", + "port", "4000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:6000", + "ip", "127.0.0.1", + "port", "6000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + ), + ), + log: logr.Discard(), + }, + want: Generator{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewGenerator(tt.args.ctx, tt.args.instance, tt.args.cl, tt.args.pool, tt.args.log) + if (err != nil) != tt.wantErr { + t.Errorf("NewGenerator() error = %v, wantErr %v", err, tt.wantErr) + return + } + deep.CompareUnexportedFields = true + if diff := cmp.Diff(got, tt.want, cmp.AllowUnexported(Generator{}), cmpopts.IgnoreUnexported(twemproxy.Server{})); len(diff) != 0 { + t.Errorf("NewGenerator() = diff %v", diff) + } + }) + } +} diff --git a/pkg/redis/crud/client/fake_client.go b/pkg/redis/client/fake_client.go similarity index 65% rename from pkg/redis/crud/client/fake_client.go rename to pkg/redis/client/fake_client.go index 77c33271..7c95a09e 100644 --- a/pkg/redis/crud/client/fake_client.go +++ b/pkg/redis/client/fake_client.go @@ -7,13 +7,50 @@ import ( "github.com/go-redis/redis/v8" ) +type FakeResponse struct { + InjectResponse func() interface{} + InjectError func() error +} + +// Some predefined responses used in many tests +func NewPredefinedRedisFakeResponse(dictionary string, err error) FakeResponse { + var rsp []interface{} + + switch dictionary { + case "save": + rsp = []interface{}{"save", "900 1 300 10"} + case "no-save": + rsp = []interface{}{"save", ""} + case "slave-read-only-no": + rsp = []interface{}{"read-only", "no"} + case "slave-read-only-yes": + rsp = []interface{}{"read-only", "yes"} + case "role-slave": + rsp = []interface{}{"slave", "127.0.0.1:3333"} + case "role-master": + rsp = []interface{}{"master", ""} + default: + panic("response not defined") + } + + return FakeResponse{ + // cmd: RedisConfigGet("save") + InjectResponse: func() interface{} { + return rsp + }, + InjectError: func() error { return err }, + } +} + type FakeClient struct { Responses []FakeResponse } -type FakeResponse struct { - InjectResponse func() interface{} - InjectError func() error +func NewFakeClient(responses ...FakeResponse) TestableInterface { + + return &FakeClient{ + Responses: responses, + } } func (fc *FakeClient) SentinelMaster(ctx context.Context, shard string) (*SentinelMasterCmdResult, error) { @@ -51,6 +88,16 @@ func (fc *FakeClient) SentinelInfoCache(ctx context.Context) (interface{}, error return rsp.InjectResponse(), rsp.InjectError() } +func (fc *FakeClient) SentinelPing(ctx context.Context) error { + rsp := fc.pop() + return rsp.InjectError() +} + +func (fc *FakeClient) SentinelDo(ctx context.Context, args ...interface{}) (interface{}, error) { + rsp := fc.pop() + return rsp.InjectResponse(), rsp.InjectError() +} + func (fc *FakeClient) RedisRole(ctx context.Context) (interface{}, error) { rsp := fc.pop() return rsp.InjectResponse(), rsp.InjectError() @@ -83,6 +130,11 @@ func (fc *FakeClient) RedisDebugSleep(ctx context.Context, duration time.Duratio return nil } +func (fc *FakeClient) RedisDo(ctx context.Context, args ...interface{}) (interface{}, error) { + rsp := fc.pop() + return rsp.InjectResponse(), rsp.InjectError() +} + func (fc *FakeClient) pop() (fakeRsp FakeResponse) { fakeRsp, fc.Responses = fc.Responses[0], fc.Responses[1:] return fakeRsp diff --git a/pkg/redis/crud/client/goredis_client.go b/pkg/redis/client/goredis_client.go similarity index 82% rename from pkg/redis/crud/client/goredis_client.go rename to pkg/redis/client/goredis_client.go index eaf0121c..7ce62115 100644 --- a/pkg/redis/crud/client/goredis_client.go +++ b/pkg/redis/client/goredis_client.go @@ -23,12 +23,23 @@ func NewFromConnectionString(connectionString string) (*GoRedisClient, error) { return nil, err } + // don't keep idle connections open + opt.MinIdleConns = 0 + c.redis = redis.NewClient(opt) c.sentinel = redis.NewSentinelClient(opt) return c, nil } +func MustNewFromConnectionString(connectionString string) *GoRedisClient { + c, err := NewFromConnectionString(connectionString) + if err != nil { + panic(err) + } + return c +} + func NewFromOptions(opt *redis.Options) *GoRedisClient { return &GoRedisClient{ redis: redis.NewClient(opt), @@ -97,6 +108,16 @@ func (c *GoRedisClient) SentinelInfoCache(ctx context.Context) (interface{}, err return val, err } +func (c *GoRedisClient) SentinelPing(ctx context.Context) error { + _, err := c.sentinel.Ping(ctx).Result() + return err +} + +func (c *GoRedisClient) SentinelDo(ctx context.Context, args ...interface{}) (interface{}, error) { + val, err := c.redis.Do(ctx, args...).Result() + return val, err +} + func (c *GoRedisClient) RedisRole(ctx context.Context) (interface{}, error) { val, err := c.redis.Do(ctx, "role").Result() @@ -126,3 +147,8 @@ func (c *GoRedisClient) RedisDebugSleep(ctx context.Context, duration time.Durat _, err := c.redis.Do(ctx, "debug", "sleep", fmt.Sprintf("%.1f", duration.Seconds())).Result() return err } + +func (c *GoRedisClient) RedisDo(ctx context.Context, args ...interface{}) (interface{}, error) { + val, err := c.redis.Do(ctx, args...).Result() + return val, err +} diff --git a/pkg/redis/client/interface.go b/pkg/redis/client/interface.go new file mode 100644 index 00000000..12123674 --- /dev/null +++ b/pkg/redis/client/interface.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "time" + + "github.com/go-redis/redis/v8" +) + +// TestableInterface is an interface that both the go-redis and the fake client implement. It's not intended to +// support client implementations other than go-redis, it just exists to be able to inject redis server +// responses through the use of the Fake client, for testing purposes. +type TestableInterface interface { + SentinelMaster(context.Context, string) (*SentinelMasterCmdResult, error) + SentinelMasters(context.Context) ([]interface{}, error) + SentinelSlaves(context.Context, string) ([]interface{}, error) + SentinelMonitor(context.Context, string, string, string, int) error + SentinelSet(context.Context, string, string, string) error + SentinelPSubscribe(context.Context, ...string) (<-chan *redis.Message, func() error) + SentinelInfoCache(context.Context) (interface{}, error) + SentinelDo(context.Context, ...interface{}) (interface{}, error) + SentinelPing(ctx context.Context) error + RedisRole(context.Context) (interface{}, error) + RedisConfigGet(context.Context, string) ([]interface{}, error) + RedisConfigSet(context.Context, string, string) error + RedisSlaveOf(context.Context, string, string) error + RedisDebugSleep(context.Context, time.Duration) error + RedisDo(context.Context, ...interface{}) (interface{}, error) + Close() error +} + +// check that GoRedisClient implements Client interface +var _ TestableInterface = &GoRedisClient{} + +// check that FakeClient implements Client interface +var _ TestableInterface = &FakeClient{} diff --git a/pkg/redis/crud/client/types.go b/pkg/redis/client/types.go similarity index 100% rename from pkg/redis/crud/client/types.go rename to pkg/redis/client/types.go diff --git a/pkg/redis/crud/client/types_test.go b/pkg/redis/client/types_test.go similarity index 100% rename from pkg/redis/crud/client/types_test.go rename to pkg/redis/client/types_test.go diff --git a/pkg/redis/crud/crud.go b/pkg/redis/crud/crud.go deleted file mode 100644 index 3d41e0f8..00000000 --- a/pkg/redis/crud/crud.go +++ /dev/null @@ -1,243 +0,0 @@ -package crud - -import ( - "bufio" - "context" - "fmt" - "strings" - "time" - - "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/go-redis/redis/v8" -) - -type CRUD struct { - Client Client - IP string - Port string -} - -type Client interface { - SentinelMaster(context.Context, string) (*client.SentinelMasterCmdResult, error) - SentinelMasters(context.Context) ([]interface{}, error) - SentinelSlaves(context.Context, string) ([]interface{}, error) - SentinelMonitor(context.Context, string, string, string, int) error - SentinelSet(context.Context, string, string, string) error - SentinelPSubscribe(context.Context, ...string) (<-chan *redis.Message, func() error) - SentinelInfoCache(context.Context) (interface{}, error) - RedisRole(context.Context) (interface{}, error) - RedisConfigGet(context.Context, string) ([]interface{}, error) - RedisConfigSet(context.Context, string, string) error - RedisSlaveOf(context.Context, string, string) error - RedisDebugSleep(context.Context, time.Duration) error - Close() error -} - -// check that GoRedisClient implements Client interface -var _ Client = &client.GoRedisClient{} - -// check that FakeClient implements Client interface -var _ Client = &client.FakeClient{} - -func NewRedisCRUDFromConnectionString(connectionString string) (*CRUD, error) { - - opt, err := redis.ParseURL(connectionString) - if err != nil { - return nil, err - } - - parts := strings.Split(opt.Addr, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("error parsing redis/sentinel address") - } - - return &CRUD{ - IP: parts[0], - Port: parts[1], - Client: client.NewFromOptions(opt), - }, nil -} - -func (crud *CRUD) CloseClient() error { - return crud.Client.Close() -} - -func NewFakeCRUD(responses ...client.FakeResponse) *CRUD { - - return &CRUD{ - IP: "fake-ip", - Port: "fake-port", - Client: &client.FakeClient{Responses: responses}, - } -} - -func (crud *CRUD) GetIP() string { - return crud.IP -} - -func (sc *CRUD) GetPort() string { - return sc.Port -} - -func (crud *CRUD) SentinelMaster(ctx context.Context, shard string) (*client.SentinelMasterCmdResult, error) { - - result, err := crud.Client.SentinelMaster(ctx, shard) - if err != nil { - return nil, err - } - return result, nil -} - -func (crud *CRUD) SentinelMasters(ctx context.Context) ([]client.SentinelMasterCmdResult, error) { - - values, err := crud.Client.SentinelMasters(ctx) - if err != nil { - return nil, err - } - - result := make([]client.SentinelMasterCmdResult, len(values)) - for i, val := range values { - masterResult := &client.SentinelMasterCmdResult{} - err := sliceCmdToStruct(val, masterResult) - if err != nil { - return nil, err - } - result[i] = *masterResult - } - - return result, nil -} - -func (crud *CRUD) SentinelSlaves(ctx context.Context, shard string) ([]client.SentinelSlaveCmdResult, error) { - - values, err := crud.Client.SentinelSlaves(ctx, shard) - if err != nil { - return nil, err - } - - result := make([]client.SentinelSlaveCmdResult, len(values)) - for i, val := range values { - slaveResult := &client.SentinelSlaveCmdResult{} - err := sliceCmdToStruct(val, slaveResult) - if err != nil { - return nil, err - } - result[i] = *slaveResult - } - - return result, nil -} - -func (crud *CRUD) SentinelMonitor(ctx context.Context, name, host string, port string, quorum int) error { - return crud.Client.SentinelMonitor(ctx, name, host, port, quorum) -} - -func (crud *CRUD) SentinelSet(ctx context.Context, shard, parameter, value string) error { - return crud.Client.SentinelSet(ctx, shard, parameter, value) -} - -func (crud *CRUD) SentinelPSubscribe(ctx context.Context, events ...string) (<-chan *redis.Message, func() error) { - return crud.Client.SentinelPSubscribe(ctx, events...) -} - -func (crud *CRUD) SentinelInfoCache(ctx context.Context) (client.SentinelInfoCache, error) { - result := client.SentinelInfoCache{} - - raw, err := crud.Client.SentinelInfoCache(ctx) - mval := islice2imap(raw) - - for shard, servers := range mval { - result[shard] = make(map[string]client.RedisServerInfoCache, len(servers.([]interface{}))) - - for _, server := range servers.([]interface{}) { - // When sentinel is unable to reach the redis slave the info field can be nil - // so we have to check this to avoid panics - if server.([]interface{})[1] != nil { - info := infoStringToMap(server.([]interface{})[1].(string)) - result[shard][info["run_id"]] = client.RedisServerInfoCache{ - CacheAge: time.Duration(server.([]interface{})[0].(int64)) * time.Millisecond, - Info: info, - } - } - } - } - - return result, err -} - -func (crud *CRUD) RedisRole(ctx context.Context) (client.Role, string, error) { - val, err := crud.Client.RedisRole(ctx) - if err != nil { - return client.Unknown, "", err - } - - if client.Role(val.([]interface{})[0].(string)) == client.Master { - return client.Master, "", nil - } else { - return client.Slave, val.([]interface{})[1].(string), nil - } -} - -func (crud *CRUD) RedisConfigGet(ctx context.Context, parameter string) (string, error) { - val, err := crud.Client.RedisConfigGet(ctx, parameter) - if err != nil { - return "", err - } - return val[1].(string), nil -} - -func (crud *CRUD) RedisConfigSet(ctx context.Context, parameter, value string) error { - return crud.Client.RedisConfigSet(ctx, parameter, value) -} - -func (sc *CRUD) RedisSlaveOf(ctx context.Context, host, port string) error { - return sc.Client.RedisSlaveOf(ctx, host, port) -} - -func (crud *CRUD) RedisDebugSleep(ctx context.Context, duration time.Duration) error { - return crud.Client.RedisDebugSleep(ctx, duration) -} - -// This is a horrible function to parse the horrible structs that the redis-go -// client returns for administrative commands. I swear it's not my fault ... -func sliceCmdToStruct(in interface{}, out interface{}) error { - m := map[string]string{} - for i := range in.([]interface{}) { - if i%2 != 0 { - continue - } - m[in.([]interface{})[i].(string)] = in.([]interface{})[i+1].(string) - } - - err := redis.NewStringStringMapResult(m, nil).Scan(out) - if err != nil { - return err - } - return nil -} - -func islice2imap(in interface{}) map[string]interface{} { - m := map[string]interface{}{} - for i := range in.([]interface{}) { - if i%2 != 0 { - continue - } - m[in.([]interface{})[i].(string)] = in.([]interface{})[i+1].([]interface{}) - } - return m -} - -func infoStringToMap(in string) map[string]string { - - m := map[string]string{} - scanner := bufio.NewScanner(strings.NewReader(in)) - for scanner.Scan() { - // do not add empty lines or section headings (see the test for more info) - if line := scanner.Text(); line != "" && !strings.HasPrefix(line, "# ") { - kv := strings.SplitN(line, ":", 2) - m[kv[0]] = kv[1] - } - } - - return m -} diff --git a/pkg/redis/events/watcher.go b/pkg/redis/events/watcher.go index 79f227c4..f1d490db 100644 --- a/pkg/redis/events/watcher.go +++ b/pkg/redis/events/watcher.go @@ -5,7 +5,8 @@ import ( "fmt" "github.com/3scale/saas-operator/pkg/reconcilers/threads" - "github.com/3scale/saas-operator/pkg/redis" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/redis/sharded" "github.com/go-logr/logr" "github.com/prometheus/client_golang/prometheus" "sigs.k8s.io/controller-runtime/pkg/client" @@ -75,18 +76,34 @@ func init() { var _ threads.RunnableThread = &SentinelEventWatcher{} type SentinelEventWatcher struct { - Instance client.Object - SentinelURI string - ExportMetrics bool - Topology *redis.ShardedCluster + instance client.Object + sentinelURI string + exportMetrics bool + topology *sharded.Cluster eventsCh chan event.GenericEvent started bool cancel context.CancelFunc - sentinel *redis.SentinelServer + sentinel *sharded.SentinelServer +} + +func NewSentinelEventWatcher(sentinelURI string, instance client.Object, topology *sharded.Cluster, + metrics bool, pool *redis.ServerPool) (*SentinelEventWatcher, error) { + sentinel, err := sharded.NewSentinelServerFromPool(sentinelURI, nil, pool) + if err != nil { + return nil, err + } + + return &SentinelEventWatcher{ + instance: instance, + sentinelURI: sentinelURI, + exportMetrics: metrics, + topology: topology, + sentinel: sentinel, + }, nil } func (sew *SentinelEventWatcher) GetID() string { - return sew.SentinelURI + return sew.sentinelURI } // IsStarted returns whether the metrics gatherer is running or not @@ -99,34 +116,27 @@ func (sew *SentinelEventWatcher) SetChannel(ch chan event.GenericEvent) { } func (sew *SentinelEventWatcher) Cleanup() error { - return sew.sentinel.CRUD.CloseClient() + return sew.sentinel.CloseClient() } // Start starts metrics gatherer for sentinel func (sew *SentinelEventWatcher) Start(parentCtx context.Context, l logr.Logger) error { - log := l.WithValues("sentinel", sew.SentinelURI) + log := l.WithValues("sentinel", sew.sentinelURI) if sew.started { log.Info("the event watcher is already running") return nil } - if sew.ExportMetrics { + if sew.exportMetrics { // Initializes metrics with 0 value sew.initCounters() } - var err error - sew.sentinel, err = redis.NewSentinelServerFromConnectionString(sew.SentinelURI, sew.SentinelURI) - if err != nil { - log.Error(err, "cannot create SentinelServer") - return err - } - go func() { var ctx context.Context ctx, sew.cancel = context.WithCancel(parentCtx) - ch, closeWatch := sew.sentinel.CRUD.SentinelPSubscribe(ctx, + ch, closeWatch := sew.sentinel.SentinelPSubscribe(ctx, `+switch-master`, `-failover-abort-no-good-slave`, `[+\-]sdown`, @@ -140,7 +150,7 @@ func (sew *SentinelEventWatcher) Start(parentCtx context.Context, l logr.Logger) case msg := <-ch: log.V(1).Info("received event from sentinel", "event", msg.String()) - sew.eventsCh <- event.GenericEvent{Object: sew.Instance} + sew.eventsCh <- event.GenericEvent{Object: sew.instance} rem, err := NewRedisEventMessage(msg) if err == nil { log.V(3).Info("redis event message parsed", @@ -150,7 +160,7 @@ func (sew *SentinelEventWatcher) Start(parentCtx context.Context, l logr.Logger) "master-type", rem.master.role, "master-name", rem.master.name, "master-ip", rem.master.ip, "master-port", rem.target.port, ) - if sew.ExportMetrics { + if sew.exportMetrics { sew.metricsFromEvent(rem) } } else { @@ -159,7 +169,6 @@ func (sew *SentinelEventWatcher) Start(parentCtx context.Context, l logr.Logger) case <-ctx.Done(): log.Info("shutting down event watcher") - sew.sentinel.Cleanup(log) sew.started = false return } @@ -180,14 +189,14 @@ func (sew *SentinelEventWatcher) metricsFromEvent(rem RedisEventMessage) { case "+switch-master": switchMasterCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.master.name, + "sentinel": sew.sentinelURI, "shard": rem.master.name, "redis_server": fmt.Sprintf("%s:%s", rem.master.ip, rem.master.port), }, ).Add(1) case "-failover-abort-no-good-slave": failoverAbortNoGoodSlaveCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.target.name, + "sentinel": sew.sentinelURI, "shard": rem.target.name, }, ).Add(1) case "+sdown": @@ -195,14 +204,14 @@ func (sew *SentinelEventWatcher) metricsFromEvent(rem RedisEventMessage) { case "sentinel": sdownSentinelCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.master.name, + "sentinel": sew.sentinelURI, "shard": rem.master.name, "redis_server": fmt.Sprintf("%s:%s", rem.target.ip, rem.target.port), }, ).Add(1) default: sdownCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.master.name, + "sentinel": sew.sentinelURI, "shard": rem.master.name, "redis_server": fmt.Sprintf("%s:%s", rem.target.ip, rem.target.port), }, ).Add(1) @@ -212,14 +221,14 @@ func (sew *SentinelEventWatcher) metricsFromEvent(rem RedisEventMessage) { case "sentinel": sdownClearedSentinelCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.master.name, + "sentinel": sew.sentinelURI, "shard": rem.master.name, "redis_server": fmt.Sprintf("%s:%s", rem.target.ip, rem.target.port), }, ).Add(1) default: sdownClearedCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": rem.master.name, + "sentinel": sew.sentinelURI, "shard": rem.master.name, "redis_server": fmt.Sprintf("%s:%s", rem.target.ip, rem.target.port), }, ).Add(1) @@ -228,44 +237,44 @@ func (sew *SentinelEventWatcher) metricsFromEvent(rem RedisEventMessage) { } func (sew *SentinelEventWatcher) initCounters() { - if sew.Topology != nil { + if sew.topology != nil { - for _, shard := range *sew.Topology { + for _, shard := range sew.topology.Shards { failoverAbortNoGoodSlaveCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, + "sentinel": sew.sentinelURI, "shard": shard.Name, }, ).Add(0) for _, server := range shard.Servers { switchMasterCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, - "redis_server": fmt.Sprintf("%s:%s", server.CRUD.IP, server.CRUD.Port), + "sentinel": sew.sentinelURI, "shard": shard.Name, + "redis_server": server.ID(), }, ).Add(0) sdownSentinelCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, - "redis_server": fmt.Sprintf("%s:%s", server.CRUD.IP, server.CRUD.Port), + "sentinel": sew.sentinelURI, "shard": shard.Name, + "redis_server": server.ID(), }, ).Add(0) sdownCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, - "redis_server": fmt.Sprintf("%s:%s", server.CRUD.IP, server.CRUD.Port), + "sentinel": sew.sentinelURI, "shard": shard.Name, + "redis_server": server.ID(), }, ).Add(0) sdownClearedSentinelCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, - "redis_server": fmt.Sprintf("%s:%s", server.CRUD.IP, server.CRUD.Port), + "sentinel": sew.sentinelURI, "shard": shard.Name, + "redis_server": server.ID(), }, ).Add(0) sdownClearedCount.With( prometheus.Labels{ - "sentinel": sew.SentinelURI, "shard": shard.Name, - "redis_server": fmt.Sprintf("%s:%s", server.CRUD.IP, server.CRUD.Port), + "sentinel": sew.sentinelURI, "shard": shard.Name, + "redis_server": server.ID(), }, ).Add(0) } diff --git a/pkg/redis/metrics/sentinel_metrics.go b/pkg/redis/metrics/sentinel_metrics.go index 9cb91887..e32c0817 100644 --- a/pkg/redis/metrics/sentinel_metrics.go +++ b/pkg/redis/metrics/sentinel_metrics.go @@ -6,8 +6,9 @@ import ( "time" "github.com/3scale/saas-operator/pkg/reconcilers/threads" - "github.com/3scale/saas-operator/pkg/redis" - "github.com/3scale/saas-operator/pkg/redis/crud/client" + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/redis/sharded" "github.com/go-logr/logr" "github.com/prometheus/client_golang/prometheus" "sigs.k8s.io/controller-runtime/pkg/event" @@ -96,15 +97,28 @@ var _ threads.RunnableThread = &SentinelMetricsGatherer{} // SentinelMetricsGatherer is used to export sentinel metrics, obtained // thrugh several admin commands, as prometheus metrics type SentinelMetricsGatherer struct { - RefreshInterval time.Duration - SentinelURI string + refreshInterval time.Duration + sentinelURI string + sentinel *sharded.SentinelServer started bool cancel context.CancelFunc - sentinel *redis.SentinelServer +} + +func NewSentinelMetricsGatherer(sentinelURI string, refreshInterval time.Duration, pool *redis.ServerPool) (*SentinelMetricsGatherer, error) { + sentinel, err := sharded.NewSentinelServerFromPool(sentinelURI, nil, pool) + if err != nil { + return nil, err + } + + return &SentinelMetricsGatherer{ + refreshInterval: refreshInterval, + sentinelURI: sentinelURI, + sentinel: sentinel, + }, nil } func (fw *SentinelMetricsGatherer) GetID() string { - return fw.SentinelURI + return fw.sentinelURI } // IsStarted returns whether the metrics gatherer is running or not @@ -118,24 +132,17 @@ func (fw *SentinelMetricsGatherer) SetChannel(ch chan event.GenericEvent) {} // Start starts metrics gatherer for sentinel func (smg *SentinelMetricsGatherer) Start(parentCtx context.Context, l logr.Logger) error { - log := l.WithValues("sentinel", smg.SentinelURI) + log := l.WithValues("sentinel", smg.sentinelURI) if smg.started { log.Info("the metrics gatherer is already running") return nil } - var err error - smg.sentinel, err = redis.NewSentinelServerFromConnectionString(smg.SentinelURI, smg.SentinelURI) - if err != nil { - log.Error(err, "cannot create SentinelServer") - return err - } - go func() { var ctx context.Context ctx, smg.cancel = context.WithCancel(parentCtx) - ticker := time.NewTicker(smg.RefreshInterval) + ticker := time.NewTicker(smg.refreshInterval) log.Info("sentinel metrics gatherer running") @@ -149,7 +156,6 @@ func (smg *SentinelMetricsGatherer) Start(parentCtx context.Context, l logr.Logg case <-ctx.Done(): log.Info("shutting down sentinel metrics gatherer") - smg.sentinel.Cleanup(log) smg.started = false return } @@ -178,38 +184,38 @@ func (smg *SentinelMetricsGatherer) Stop() { func (smg *SentinelMetricsGatherer) gatherMetrics(ctx context.Context) error { - mresult, err := smg.sentinel.CRUD.SentinelMasters(ctx) + mresult, err := smg.sentinel.SentinelMasters(ctx) if err != nil { return err } for _, master := range mresult { - serverInfo.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + serverInfo.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(1)) - linkPendingCommands.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + linkPendingCommands.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(master.LinkPendingCommands)) - lastOkPingReply.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + lastOkPingReply.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(master.LastOkPingReply)) - roleReportedTime.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + roleReportedTime.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(master.RoleReportedTime)) - numSlaves.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + numSlaves.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(master.NumSlaves)) - numOtherSentinels.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + numOtherSentinels.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": master.RoleReported, }).Set(float64(master.NumOtherSentinels)) - sresult, err := smg.sentinel.CRUD.SentinelSlaves(ctx, master.Name) + sresult, err := smg.sentinel.SentinelSlaves(ctx, master.Name) if err != nil { return err } @@ -217,7 +223,7 @@ func (smg *SentinelMetricsGatherer) gatherMetrics(ctx context.Context) error { // Cleanup any vector that corresponds to the same server but with a // different role to avoid stale metrics after a role switch cleanupMetrics(prometheus.Labels{ - "sentinel": smg.SentinelURI, + "sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", master.IP, master.Port), "role": string(client.Slave), @@ -225,32 +231,32 @@ func (smg *SentinelMetricsGatherer) gatherMetrics(ctx context.Context) error { for _, slave := range sresult { - serverInfo.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + serverInfo.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(1)) - linkPendingCommands.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + linkPendingCommands.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(slave.LinkPendingCommands)) - lastOkPingReply.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + lastOkPingReply.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(slave.LastOkPingReply)) - roleReportedTime.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + roleReportedTime.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(slave.RoleReportedTime)) - masterLinkDownTime.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + masterLinkDownTime.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(slave.MasterLinkDownTime)) - slaveReplOffset.With(prometheus.Labels{"sentinel": smg.SentinelURI, "shard": master.Name, + slaveReplOffset.With(prometheus.Labels{"sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": slave.RoleReported, }).Set(float64(slave.SlaveReplOffset)) cleanupMetrics(prometheus.Labels{ - "sentinel": smg.SentinelURI, + "sentinel": smg.sentinelURI, "shard": master.Name, "redis_server": fmt.Sprintf("%s:%d", slave.IP, slave.Port), "role": string(client.Master), diff --git a/pkg/redis/redis_server.go b/pkg/redis/redis_server.go deleted file mode 100644 index 956facc4..00000000 --- a/pkg/redis/redis_server.go +++ /dev/null @@ -1,78 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "net" - - "github.com/3scale/saas-operator/pkg/redis/crud" - "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/3scale/saas-operator/pkg/util" - "github.com/go-logr/logr" -) - -// RedisServer represent a redis server and its characteristics -type RedisServer struct { - Name string - Host string - Port string - Role client.Role - CRUD *crud.CRUD -} - -func (rs *RedisServer) IP() (string, error) { - var ip string - if r := net.ParseIP(rs.Host); r != nil { - ip = r.String() - } else { - // if it is not an IP, try to resolve a DNS - ips, err := net.LookupIP(rs.Host) - if err != nil { - return "", err - } - if len(ips) > 1 { - return "", fmt.Errorf("dns resolves to more than 1 IP") - } - ip = ips[0].String() - } - - return ip, nil -} - -func NewRedisServerFromConnectionString(name, connectionString string) (*RedisServer, error) { - - crud, err := crud.NewRedisCRUDFromConnectionString(connectionString) - if err != nil { - return nil, err - } - - return &RedisServer{Name: name, Host: crud.GetIP(), Port: crud.GetPort(), CRUD: crud, Role: client.Unknown}, nil -} - -// Cleanup closes all Redis clients opened during the RedisServer object creation -func (srv *RedisServer) Cleanup(log logr.Logger) error { - log.V(2).Info("[@redis-server-cleanup] closing client", - "server", srv.Name, "host", srv.Host, - ) - if err := srv.CRUD.CloseClient(); err != nil { - log.Error(err, "[@redis-server-cleanup] error closing server client", - "server", srv.Name, "host", srv.Host, - ) - return err - } - return nil -} - -// Discover returns the Role for a given -// redis Server -func (srv *RedisServer) Discover(ctx context.Context) error { - - role, _, err := srv.CRUD.RedisRole(ctx) - if err != nil { - srv.Role = client.Unknown - return util.WrapError("redis-autodiscovery", err) - } - srv.Role = role - - return nil -} diff --git a/pkg/redis/redis_server_test.go b/pkg/redis/redis_server_test.go deleted file mode 100644 index d918f0e8..00000000 --- a/pkg/redis/redis_server_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package redis - -import ( - "context" - "errors" - "testing" - - "github.com/3scale/saas-operator/pkg/redis/crud" - "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/go-test/deep" -) - -func TestNewRedisServerFromConnectionString(t *testing.T) { - type args struct { - name string - connectionString string - } - tests := []struct { - name string - args args - want *RedisServer - wantErr bool - }{ - { - name: "Returns a new RedisServer object for the given connection string", - args: args{ - name: "test", - connectionString: "redis://127.0.0.1:3333", - }, - want: &RedisServer{ - Name: "test", - Host: "127.0.0.1", - Port: "3333", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:3333"); return c }(), - }, - wantErr: false, - }, - { - name: "Returns error", - args: args{ - name: "test", - connectionString: "127.0.0.1:3333", - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := NewRedisServerFromConnectionString(tt.args.name, tt.args.connectionString) - if (err != nil) != tt.wantErr { - t.Errorf("NewRedisServerFromConnectionString() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := deep.Equal(got, tt.want); len(diff) > 0 { - t.Errorf("NewSentinelServer() got diff: %v", diff) - } - }) - } -} - -func TestRedisServer_Discover(t *testing.T) { - type fields struct { - Name string - IP string - Port string - Role client.Role - ReadOnly bool - CRUD *crud.CRUD - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - wantRole client.Role - wantReadOnly bool - wantErr bool - }{ - { - name: "Discovers characteristics of the redis server: master/rw", - fields: fields{ - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master", ""} - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ctx: context.TODO()}, - wantRole: client.Master, - wantReadOnly: false, - wantErr: false, - }, - { - name: "Discovers characteristics of the redis server: slave/ro", - fields: fields{ - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1:3333"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ctx: context.TODO()}, - wantRole: client.Slave, - wantReadOnly: true, - wantErr: false, - }, - { - name: "Discovers characteristics of the redis server: slave/rw", - fields: fields{ - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1:3333"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"read-only", "no"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ctx: context.TODO()}, - wantRole: client.Slave, - wantReadOnly: false, - wantErr: false, - }, - { - name: "'role' command fails, returns an error", - fields: fields{ - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - args: args{ctx: context.TODO()}, - wantRole: client.Unknown, - wantReadOnly: false, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - srv := &RedisServer{ - Name: tt.fields.Name, - Host: tt.fields.IP, - Port: tt.fields.Port, - Role: tt.fields.Role, - CRUD: tt.fields.CRUD, - } - if err := srv.Discover(tt.args.ctx); (err != nil) != tt.wantErr { - t.Errorf("RedisServer.Discover() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantRole != srv.Role { - t.Errorf("RedisServer.Discover() got = %v, want %v", srv.Role, tt.wantRole) - } - }) - } -} diff --git a/pkg/redis/redis_shard.go b/pkg/redis/redis_shard.go deleted file mode 100644 index 89065d37..00000000 --- a/pkg/redis/redis_shard.go +++ /dev/null @@ -1,188 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "sort" - - "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/3scale/saas-operator/pkg/util" - "github.com/go-logr/logr" -) - -// Shard is a list of the redis Server objects that compose a redis shard -type Shard struct { - Name string - Servers []RedisServer -} - -// NewShard returns a Shard object given the passed redis server URLs -func NewShard(name string, connectionStrings []string) (*Shard, error) { - shard := &Shard{Name: name} - servers := make([]RedisServer, len(connectionStrings)) - for i, cs := range connectionStrings { - rs, err := NewRedisServerFromConnectionString(cs, cs) - if err != nil { - return shard, err - } - servers[i] = *rs - } - - shard.Servers = servers - return shard, nil -} - -// Discover retrieves the role and read-only flag for all the servers in the shard -func (s *Shard) Discover(ctx context.Context, log logr.Logger) error { - - for idx := range s.Servers { - if err := s.Servers[idx].Discover(ctx); err != nil { - return err - } - } - - masters := 0 - for _, server := range s.Servers { - if server.Role == client.Master { - masters++ - } - } - - if masters != 1 { - err := fmt.Errorf("[redis-autodiscovery/Shard.Discover] expected 1 master but got %d", masters) - log.Error(err, "error discovering shard server roles") - return err - } - - return nil -} - -// GetMasterAddr returns the URL of the master server in a shard or error if zero -// or more than one master is found -func (s *Shard) GetMasterAddr() (string, string, error) { - for _, srv := range s.Servers { - if srv.Role == client.Master { - ip, err := srv.IP() - if err != nil { - return "", "", util.WrapError("redis-autodiscovery/Shard.GetMasterAddr", err) - } - return ip, srv.Port, nil - } - } - return "", "", fmt.Errorf("[redis-autodiscovery/Shard.GetMasterAddr] master not found") -} - -// Init initializes this shard if not already initialized -func (s *Shard) Init(ctx context.Context, masterIndex int32, log logr.Logger) ([]string, error) { - changed := []string{} - - for idx, srv := range s.Servers { - role, slaveof, err := srv.CRUD.RedisRole(ctx) - if err != nil { - return changed, err - } - - if role == client.Slave { - - if slaveof == "127.0.0.1" { - - if idx == int(masterIndex) { - if err := srv.CRUD.RedisSlaveOf(ctx, "NO", "ONE"); err != nil { - return changed, err - } - log.Info(fmt.Sprintf("[@redis-setup] Configured %s as master", srv.Name)) - changed = append(changed, srv.Name) - } else { - if err := srv.CRUD.RedisSlaveOf(ctx, s.Servers[masterIndex].Host, s.Servers[masterIndex].Port); err != nil { - return changed, err - } - log.Info(fmt.Sprintf("[@redis-setup] Configured %s as slave", srv.Name)) - changed = append(changed, srv.Name) - } - - } else { - s.Servers[idx].Role = client.Slave - } - - } else if role == client.Master { - s.Servers[idx].Role = client.Master - } else { - return changed, fmt.Errorf("[@redis-setup] unable to get role for server %s", srv.Name) - } - } - - return changed, nil -} - -// Cleanup closes all Redis clients opened during the Shard object creation -func (s *Shard) Cleanup(log logr.Logger) []error { - log.V(1).Info("[@redis-shard-cleanup] closing redis shard clients", - "shard", s.Name, - ) - var closeErrors []error - for _, server := range s.Servers { - if err := server.Cleanup(log); err != nil { - closeErrors = append(closeErrors, err) - } - } - - return closeErrors -} - -// ShardedCluster represents a sharded redis cluster, composed by several Shards -type ShardedCluster []Shard - -// NewShardedCluster returns a new ShardedCluster given the shard structure passed as a map[string][]string -func NewShardedCluster(ctx context.Context, serverList map[string][]string, log logr.Logger) (ShardedCluster, error) { - - sc := make([]Shard, 0, len(serverList)) - - for shardName, shardServers := range serverList { - - shard, err := NewShard(shardName, shardServers) - if err != nil { - return nil, err - } - sc = append(sc, *shard) - } - - return sc, nil -} - -// Cleanup closes all Redis clients opened during the ShardedCluster object creation -func (sc ShardedCluster) Cleanup(log logr.Logger) []error { - var cleanupErrors []error - for _, shard := range sc { - if err := shard.Cleanup(log); err != nil { - cleanupErrors = append(cleanupErrors, err...) - } - } - return cleanupErrors -} - -func (sc ShardedCluster) Discover(ctx context.Context, log logr.Logger) error { - for _, shard := range sc { - if err := shard.Discover(ctx, log); err != nil { - return err - } - } - return nil -} - -func (sc ShardedCluster) GetShardNames() []string { - shards := make([]string, len(sc)) - for i, shard := range sc { - shards[i] = shard.Name - } - sort.Strings(shards) - return shards -} - -func (sc ShardedCluster) GetShardByName(name string) *Shard { - for _, shard := range sc { - if shard.Name == name { - return &shard - } - } - return nil -} diff --git a/pkg/redis/redis_shard_test.go b/pkg/redis/redis_shard_test.go deleted file mode 100644 index ba44a2a3..00000000 --- a/pkg/redis/redis_shard_test.go +++ /dev/null @@ -1,799 +0,0 @@ -package redis - -import ( - "context" - "errors" - "reflect" - "sort" - "testing" - - "github.com/3scale/saas-operator/pkg/redis/crud" - "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/go-logr/logr" - "github.com/go-test/deep" -) - -func TestNewShard(t *testing.T) { - type args struct { - name string - connectionStrings []string - } - tests := []struct { - name string - args args - want *Shard - wantErr bool - }{ - { - name: "Returns a new Shard object", - args: args{ - name: "test", - connectionStrings: []string{"redis://127.0.0.1:1000", "redis://127.0.0.1:2000", "redis://127.0.0.1:3000"}, - }, - want: &Shard{ - Name: "test", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:1000"); return c }(), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:2000"); return c }(), - }, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:3000"); return c }(), - }, - }, - }, - wantErr: false, - }, - { - name: "Returns an error (bad connection string)", - args: args{ - name: "test", - connectionStrings: []string{"redis://127.0.0.1:1000", "127.0.0.1:2000", "redis://127.0.0.1:3000"}, - }, - want: &Shard{ - Name: "test", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := NewShard(tt.args.name, tt.args.connectionStrings) - if (err != nil) != tt.wantErr { - t.Errorf("NewShard() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := deep.Equal(got, tt.want); len(diff) > 0 { - t.Errorf("NewShard() got diff: %v", diff) - } - }) - } -} - -func TestShard_Discover(t *testing.T) { - type fields struct { - Name string - Servers []RedisServer - } - type args struct { - ctx context.Context - log logr.Logger - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "Discovers roles for all servers in the shard", - fields: fields{ - Name: "test", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - )}, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - args: args{ctx: context.TODO(), log: logr.Discard()}, - wantErr: false, - }, - { - name: "second server fails, returns error", - fields: fields{ - Name: "test", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{} - }, - InjectError: func() error { return errors.New("error") }, - }, - )}, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - args: args{ctx: context.TODO(), log: logr.Discard()}, - wantErr: true, - }, - { - name: "no master, returns error", - fields: fields{ - Name: "test", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "no one"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "no one"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "no one"} - }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - args: args{ctx: context.TODO(), log: logr.Discard()}, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Shard{ - Name: tt.fields.Name, - Servers: tt.fields.Servers, - } - if err := s.Discover(tt.args.ctx, tt.args.log); (err != nil) != tt.wantErr { - t.Errorf("Shard.Discover() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestShard_Init(t *testing.T) { - type fields struct { - Name string - Servers []RedisServer - } - type args struct { - ctx context.Context - masterIndex int32 - log logr.Logger - } - tests := []struct { - name string - fields fields - args args - want []string - wantErr bool - }{ - { - name: "All redis servers configured", - fields: fields{ - Name: "test", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - )}, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "127.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - args: args{ctx: context.TODO(), masterIndex: 0, log: logr.Discard()}, - want: []string{"redis://127.0.0.1:1000", "redis://127.0.0.1:2000", "redis://127.0.0.1:3000"}, - wantErr: false, - }, - { - name: "No configuration needed", - fields: fields{ - Name: "All redis servers configured", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "10.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - )}, - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"slave", "10.0.0.1"} - }, - InjectError: func() error { return nil }, - }, - client.FakeResponse{ - InjectResponse: func() interface{} { return nil }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - args: args{ctx: context.TODO(), masterIndex: 0, log: logr.Discard()}, - want: []string{}, - wantErr: false, - }, - { - name: "Returns error", - fields: fields{ - Name: "All redis servers configured", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - }, - }, - args: args{ctx: context.TODO(), masterIndex: 0, log: logr.Discard()}, - want: []string{}, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &Shard{ - Name: tt.fields.Name, - Servers: tt.fields.Servers, - } - got, err := s.Init(tt.args.ctx, tt.args.masterIndex, tt.args.log) - if (err != nil) != tt.wantErr { - t.Errorf("Shard.Init() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Shard.Init() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestNewShardedCluster(t *testing.T) { - type args struct { - ctx context.Context - serverList map[string][]string - log logr.Logger - } - tests := []struct { - name string - args args - want ShardedCluster - wantErr bool - }{ - { - name: "Returns a new ShardedCluster object", - args: args{ - ctx: context.TODO(), - serverList: map[string][]string{ - "shard00": {"redis://127.0.0.1:1000", "redis://127.0.0.1:2000"}, - "shard01": {"redis://127.0.0.1:3000", "redis://127.0.0.1:4000"}, - }, - log: logr.Discard(), - }, - want: ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:1000"); return c }(), - }, - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "2000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:2000"); return c }(), - }, - }, - }, - { - Name: "shard01", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:3000"); return c }(), - }, - { - Name: "redis://127.0.0.1:4000", - Host: "127.0.0.1", - Port: "4000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:4000"); return c }(), - }, - }, - }, - }, - wantErr: false, - }, - { - name: "Returns error", - args: args{ - ctx: context.TODO(), - serverList: map[string][]string{ - "shard00": {"redis://127.0.0.1:1000", "redis://127.0.0.1:2000"}, - "shard01": {"127.0.0.1:3000", "redis://127.0.0.1:4000"}, - }, - log: logr.Discard(), - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := NewShardedCluster(tt.args.ctx, tt.args.serverList, tt.args.log) - if (err != nil) != tt.wantErr { - t.Errorf("NewShardedCluster() error = %v, wantErr %v", err, tt.wantErr) - return - } - sort.SliceStable(got, func(i, j int) bool { - return got[i].Name < got[j].Name - }) - if diff := deep.Equal(got, tt.want); len(diff) > 0 { - t.Errorf("NewShardedCluster() got diff: %v", diff) - } - }) - } -} - -func TestShardedCluster_Discover(t *testing.T) { - type args struct { - ctx context.Context - log logr.Logger - } - tests := []struct { - name string - sc ShardedCluster - args args - wantErr bool - }{ - { - name: "Discovers characteristics of all servers in the ShardedCluster", - sc: ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master"} - }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - { - Name: "shard01", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{"master"} - }, - InjectError: func() error { return nil }, - }, - )}, - }, - }, - }, - args: args{ctx: context.TODO(), log: logr.Discard()}, - wantErr: false, - }, - { - name: "Returns error", - sc: ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD( - client.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return errors.New("error") }, - }, - )}, - }, - }, - { - Name: "shard01", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:3000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: crud.NewFakeCRUD()}, - }, - }, - }, - args: args{ctx: context.TODO(), log: logr.Discard()}, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := tt.sc.Discover(tt.args.ctx, tt.args.log); (err != nil) != tt.wantErr { - t.Errorf("ShardedCluster.Discover() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestShardedCluster_GetShardNames(t *testing.T) { - tests := []struct { - name string - sc ShardedCluster - want []string - }{ - { - name: "Returns the shrard names as a slice of strings", - sc: ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{}, - }, - { - Name: "shard01", - Servers: []RedisServer{}, - }, - { - Name: "shard02", - Servers: []RedisServer{}, - }, - }, - want: []string{"shard00", "shard01", "shard02"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.sc.GetShardNames(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("ShardedCluster.GetShardNames() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestShardedCluster_GetShardByName(t *testing.T) { - type args struct { - name string - } - tests := []struct { - name string - sc ShardedCluster - args args - want *Shard - }{ - { - name: "Returns the shard of the given name", - sc: ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:1000", - Host: "127.0.0.1", - Port: "1000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:1000"); return c }(), - }, - }, - }, - { - Name: "shard01", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:2000"); return c }(), - }, - }, - }, - }, - args: args{ - name: "shard01", - }, - want: &Shard{ - Name: "shard01", - Servers: []RedisServer{ - { - Name: "redis://127.0.0.1:2000", - Host: "127.0.0.1", - Port: "3000", - Role: client.Unknown, - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:2000"); return c }(), - }, - }, - }, - }, - { - name: "Returns nil if not found", - sc: ShardedCluster{}, - args: args{ - name: "shard01", - }, - want: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if diff := deep.Equal(tt.sc.GetShardByName(tt.args.name), tt.want); len(diff) > 0 { - t.Errorf("ShardedCluster.GetShardByName() got diff: %v", diff) - } - }) - } -} - -func TestRedisServer_IP(t *testing.T) { - type fields struct { - Name string - Host string - Port string - Role client.Role - ReadOnly bool - CRUD *crud.CRUD - } - tests := []struct { - name string - fields fields - want string - wantErr bool - }{ - { - name: "Returns the IP", - fields: fields{ - Name: "test", - Host: "10.0.0.0", - Port: "3333", - Role: "", - ReadOnly: false, - CRUD: &crud.CRUD{}, - }, - want: "10.0.0.0", - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rs := &RedisServer{ - Name: tt.fields.Name, - Host: tt.fields.Host, - Port: tt.fields.Port, - Role: tt.fields.Role, - CRUD: tt.fields.CRUD, - } - got, err := rs.IP() - if (err != nil) != tt.wantErr { - t.Errorf("RedisServer.IP() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("RedisServer.IP() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/redis/sentinel_pool.go b/pkg/redis/sentinel_pool.go deleted file mode 100644 index 55cbaf73..00000000 --- a/pkg/redis/sentinel_pool.go +++ /dev/null @@ -1,135 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "reflect" - "sort" - - saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -// SentinelPool represents a pool of SentinelServers that monitor the same -// group of redis shards -type SentinelPool []SentinelServer - -// NewSentinelPool creates a new SentinelPool object given a key and a number of replicas by calling the k8s API -// to discover sentinel Pods. The kye es the Name/Namespace of the StatefulSet that owns the sentinel Pods. -func NewSentinelPool(ctx context.Context, cl client.Client, key types.NamespacedName, replicas int) (SentinelPool, error) { - - spool := make([]SentinelServer, replicas) - for i := 0; i < replicas; i++ { - pod := &corev1.Pod{} - key := types.NamespacedName{Name: fmt.Sprintf("%s-%d", key.Name, i), Namespace: key.Namespace} - err := cl.Get(ctx, key, pod) - if err != nil { - return nil, err - } - - ss, err := NewSentinelServerFromConnectionString(pod.GetName(), fmt.Sprintf("redis://%s:%d", pod.Status.PodIP, saasv1alpha1.SentinelPort)) - if err != nil { - return nil, err - } - spool[i] = *ss - } - return spool, nil -} - -// Cleanup closes all Redis clients opened during the SentinelPool object creation -func (sp SentinelPool) Cleanup(log logr.Logger) []error { - log.V(1).Info("[@sentinel-pool-cleanup] closing clients") - var closeErrors []error - for _, ss := range sp { - if err := ss.Cleanup(log); err != nil { - closeErrors = append(closeErrors, err) - } - } - return closeErrors -} - -// IsMonitoringShards checks whether or all the shards in the passed list are being monitored by all -// sentinel servers in the SentinelPool -func (sp SentinelPool) IsMonitoringShards(ctx context.Context, shards []string) (bool, error) { - - for _, ss := range sp { - ok, err := ss.IsMonitoringShards(ctx, shards) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - } - - return true, nil -} - -// Monitor ensures that all the shards in the ShardedCluster object are monitored by -// all sentinel servers in the SentinelPool -func (sp SentinelPool) Monitor(ctx context.Context, shards ShardedCluster) (map[string][]string, error) { - changes := map[string][]string{} - for _, ss := range sp { - ssChanges, err := ss.Monitor(ctx, shards) - if err != nil { - return changes, err - } - if len(ssChanges) > 0 { - changes[ss.Name] = ssChanges - } - } - return changes, nil -} - -// MonitoredShards returns the list of monitored shards of this SentinelServer -func (sp SentinelPool) MonitoredShards(ctx context.Context, quorum int, options ...ShardDiscoveryOption) (saasv1alpha1.MonitoredShards, error) { - logger := log.FromContext(ctx, "function", "(SentinelPool).MonitoredShards") - responses := make([]saasv1alpha1.MonitoredShards, 0, len(sp)) - - for _, srv := range sp { - - resp, err := srv.MonitoredShards(ctx, options...) - if err != nil { - logger.Error(err, "error getting monitored shards from sentinel", "SentinelServer", srv.Name) - // jump to next sentinel if error occurs - continue - } - responses = append(responses, resp) - } - - monitoredShards, err := applyQuorum(responses, saasv1alpha1.SentinelDefaultQuorum) - if err != nil { - return nil, err - } - - return monitoredShards, nil -} - -func applyQuorum(responses []saasv1alpha1.MonitoredShards, quorum int) (saasv1alpha1.MonitoredShards, error) { - - for _, r := range responses { - // Sort each of the MonitoredShards responses to - // avoid diffs due to unordered responses from redis - sort.Sort(r) - } - - for idx, a := range responses { - count := 0 - for _, b := range responses { - if reflect.DeepEqual(a, b) { - count++ - } - } - - // check if this response has quorum - if count >= quorum { - return responses[idx], nil - } - } - - return nil, fmt.Errorf("no quorum of %d sentinels when getting monitored shards", saasv1alpha1.SentinelDefaultQuorum) -} diff --git a/pkg/redis/sentinel_pool_test.go b/pkg/redis/sentinel_pool_test.go deleted file mode 100644 index 1a926ced..00000000 --- a/pkg/redis/sentinel_pool_test.go +++ /dev/null @@ -1,775 +0,0 @@ -package redis - -import ( - "context" - "errors" - "reflect" - "testing" - - saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - "github.com/3scale/saas-operator/pkg/redis/crud" - redis "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/go-test/deep" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - k8s "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestNewSentinelPool(t *testing.T) { - type args struct { - ctx context.Context - cl client.Client - key types.NamespacedName - replicas int - } - tests := []struct { - name string - args args - want SentinelPool - wantErr bool - }{ - { - name: "Returns a SentinelPool object", - args: args{ - ctx: context.TODO(), - cl: k8s.NewClientBuilder().WithScheme(s).WithObjects( - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-0", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.1"}, - }, - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-1", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.2"}, - }, - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-2", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.3"}, - }, - ).Build(), - key: types.NamespacedName{Name: "sentinel", Namespace: "test"}, - replicas: 3, - }, - want: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:26379"); return c }(), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.2:26379"); return c }(), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.3:26379"); return c }(), - }, - }, - wantErr: false, - }, - { - name: "Pod not found, returns error", - args: args{ - ctx: context.TODO(), - cl: k8s.NewClientBuilder().WithScheme(s).WithObjects( - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-0", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.1"}, - }, - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-1", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.2"}, - }, - ).Build(), - key: types.NamespacedName{Name: "sentinel", Namespace: "test"}, - replicas: 3, - }, - want: nil, - wantErr: true, - }, - { - name: "Pod not found, returns error", - args: args{ - ctx: context.TODO(), - cl: k8s.NewClientBuilder().WithScheme(s).WithObjects( - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "sentinel-0", Namespace: "test"}, - Status: corev1.PodStatus{PodIP: "127.0.0.1:wrong"}, - }, - ).Build(), - key: types.NamespacedName{Name: "sentinel", Namespace: "test"}, - replicas: 1, - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := NewSentinelPool(tt.args.ctx, tt.args.cl, tt.args.key, tt.args.replicas) - if (err != nil) != tt.wantErr { - t.Errorf("NewSentinelPool() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := deep.Equal(got, tt.want); len(diff) > 0 { - t.Errorf("NewSentinelServer() got diff: %v", diff) - } - }) - } -} - -func TestSentinelPool_IsMonitoringShards(t *testing.T) { - type args struct { - ctx context.Context - shards []string - } - tests := []struct { - name string - sp SentinelPool - args args - want bool - wantErr bool - }{ - { - name: "Returns true", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard00", "shard01"}, - }, - want: true, - wantErr: false, - }, - { - name: "Returns false", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard00"}, - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard00", "shard01"}, - }, - want: false, - wantErr: false, - }, - { - name: "Returns false", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{} - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{} - }, - InjectError: func() error { return nil }, - }), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{} - }, - InjectError: func() error { return nil }, - }), - }, - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard00", "shard01"}, - }, - want: false, - wantErr: false, - }, - { - name: "Returns error", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{} - }, - InjectError: func() error { return errors.New("error") }, - }), - }, - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard00", "shard01"}, - }, - want: false, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.sp.IsMonitoringShards(tt.args.ctx, tt.args.shards) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelPool.IsMonitoringShards() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("SentinelPool.IsMonitoringShards() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSentinelPool_Monitor(t *testing.T) { - type args struct { - ctx context.Context - shards ShardedCluster - } - tests := []struct { - name string - sp SentinelPool - args args - want map[string][]string - wantErr bool - }{ - { - name: "No changes", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.1", - Port: 2000, - } - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.2", - Port: 3000, - } - }, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.3", - Port: 4000, - } - }, - InjectError: func() error { return nil }, - }, - ), - }, - }, - args: args{ - ctx: context.TODO(), - shards: ShardedCluster{{ - Name: "shard00", - Servers: []RedisServer{ - { - Name: "shard00-0", - Host: "127.0.0.1", - Port: "2000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard00-1", - Host: "127.0.0.1", - Port: "2001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-2", - Host: "127.0.0.1", - Port: "2002", - Role: redis.Slave, - CRUD: nil, - }, - }}, - }, - }, - want: map[string][]string{}, - wantErr: false, - }, - { - name: "Returns changes for all sentinel servers", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{} - }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{} - }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{} - }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - }, - args: args{ - ctx: context.TODO(), - shards: ShardedCluster{{ - Name: "shard00", - Servers: []RedisServer{ - { - Name: "shard00-0", - Host: "127.0.0.1", - Port: "2000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard00-1", - Host: "127.0.0.1", - Port: "2001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-2", - Host: "127.0.0.1", - Port: "2002", - Role: redis.Slave, - CRUD: nil, - }, - }}, - }, - }, - want: map[string][]string{ - "sentinel-0": {"shard00"}, - "sentinel-1": {"shard00"}, - "sentinel-2": {"shard00"}, - }, - wantErr: false, - }, - { - name: "Error returned by sentinel-1, sentinel-0 changed", - sp: []SentinelServer{ - { - Name: "sentinel-0", - IP: "127.0.0.1", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{} - }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - { - Name: "sentinel-1", - IP: "127.0.0.2", - Port: "26379", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{} - }, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - { - Name: "sentinel-2", - IP: "127.0.0.3", - Port: "26379", - // function code should error before reaching sentinel-2 - CRUD: crud.NewFakeCRUD(), - }, - }, - args: args{ - ctx: context.TODO(), - shards: ShardedCluster{{ - Name: "shard00", - Servers: []RedisServer{ - { - Name: "shard00-0", - Host: "127.0.0.1", - Port: "2000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard00-1", - Host: "127.0.0.1", - Port: "2001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-2", - Host: "127.0.0.1", - Port: "2002", - Role: redis.Slave, - CRUD: nil, - }, - }}, - }, - }, - want: map[string][]string{ - "sentinel-0": {"shard00"}, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.sp.Monitor(tt.args.ctx, tt.args.shards) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelPool.Monitor() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("SentinelPool.Monitor() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_applyQuorum(t *testing.T) { - type args struct { - responses []saasv1alpha1.MonitoredShards - quorum int - } - tests := []struct { - name string - args args - want saasv1alpha1.MonitoredShards - wantErr bool - }{ - { - name: "Should return the accepted response", - args: args{ - responses: []saasv1alpha1.MonitoredShards{ - { - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1111": {Role: redis.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:2222": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - { - Name: "shard03", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.3:3333": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - }, - { - { - Name: "shard03", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.3:3333": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1111": {Role: redis.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:2222": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - }, - }, - quorum: 2, - }, - want: []saasv1alpha1.MonitoredShard{ - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1111": {Role: redis.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:2222": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - { - Name: "shard03", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.3:3333": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - }, - wantErr: false, - }, - { - name: "Should fail, no quorum", - args: args{ - responses: []saasv1alpha1.MonitoredShards{ - { - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1111": {Role: redis.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:2222": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - { - Name: "shard03", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:3333": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - }, - { - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1111": {Role: redis.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.4:4444": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - { - Name: "shard03", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.3:3333": {Role: redis.Master, Config: map[string]string{}}, - }, - }, - }, - }, - quorum: 2, - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := applyQuorum(tt.args.responses, tt.args.quorum) - if (err != nil) != tt.wantErr { - t.Errorf("applyQuorum() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("applyQuorum() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/redis/sentinel_server.go b/pkg/redis/sentinel_server.go deleted file mode 100644 index 8bc845e8..00000000 --- a/pkg/redis/sentinel_server.go +++ /dev/null @@ -1,292 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "strings" - "time" - - saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - "github.com/3scale/saas-operator/pkg/redis/crud" - redis_client "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/3scale/saas-operator/pkg/util" - "github.com/go-logr/logr" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -const ( - shardNotInitializedError = "ERR No such master with that name" - maxInfoCacheAge = 10 * time.Second -) - -// SentinelServer represents a sentinel Pod -type SentinelServer struct { - Name string - IP string - Port string - CRUD *crud.CRUD - MonitoredRedisServers map[string]*RedisServer -} - -func NewSentinelServerFromConnectionString(name, connectionString string) (*SentinelServer, error) { - - crud, err := crud.NewRedisCRUDFromConnectionString(connectionString) - if err != nil { - return nil, err - } - - return &SentinelServer{Name: name, IP: crud.GetIP(), Port: crud.GetPort(), CRUD: crud}, nil -} - -// Cleanup closes all Redis clients opened during the SentinelServer object creation -func (ss *SentinelServer) Cleanup(log logr.Logger) error { - log.V(2).Info("[@sentinel-server-cleanup] closing client", - "server", ss.Name, "host", ss.IP, - ) - if err := ss.CRUD.CloseClient(); err != nil { - log.Error(err, "[@sentinel-server-cleanup] error closing server client", - "server", ss.Name, "host", ss.IP, - ) - return err - } - return nil -} - -// IsMonitoringShards checks whether or all the shards in the passed list are being monitored by the SentinelServer -func (ss *SentinelServer) IsMonitoringShards(ctx context.Context, shards []string) (bool, error) { - - monitoredShards, err := ss.CRUD.SentinelMasters(ctx) - if err != nil { - return false, err - } - - if len(monitoredShards) == 0 { - return false, nil - } - - for _, name := range shards { - found := false - for _, monitored := range monitoredShards { - if monitored.Name == name { - found = true - } - } - if !found { - return false, nil - } - } - - return true, nil -} - -// Monitor ensures that all the shards in the ShardedCluster object are monitored by the SentinelServer -func (ss *SentinelServer) Monitor(ctx context.Context, shards ShardedCluster) ([]string, error) { - changed := []string{} - - // Initialize unmonitored shards - shardNames := shards.GetShardNames() - for _, name := range shardNames { - - _, err := ss.CRUD.SentinelMaster(ctx, name) - if err != nil { - if err.Error() == shardNotInitializedError { - - shard := shards.GetShardByName(name) - host, port, err := shard.GetMasterAddr() - if err != nil { - return changed, err - } - - err = ss.CRUD.SentinelMonitor(ctx, name, host, port, saasv1alpha1.SentinelDefaultQuorum) - if err != nil { - return changed, util.WrapError("redis-sentinel/SentinelServer.Monitor", err) - } - // even if the next call fails, there has already been a write operation to sentinel - changed = append(changed, name) - - err = ss.CRUD.SentinelSet(ctx, name, "down-after-milliseconds", "5000") - if err != nil { - return changed, util.WrapError("redis-sentinel/SentinelServer.Monitor", err) - } - // TODO: change the default failover timeout. - // TODO: maybe add a generic mechanism to set/modify parameters - - } else { - return changed, err - } - } - } - - return changed, nil -} - -type ShardDiscoveryOption int - -const ( - OnlyMasterDiscoveryOpt ShardDiscoveryOption = iota - SlaveReadOnlyDiscoveryOpt - SaveConfigDiscoveryOpt -) - -func (sdos ShardDiscoveryOptions) Has(sdo ShardDiscoveryOption) bool { - for _, opt := range sdos { - if opt == sdo { - return true - } - } - return false -} - -type ShardDiscoveryOptions []ShardDiscoveryOption - -// MonitoredShards returns the list of monitored shards of this SentinelServer -func (ss *SentinelServer) MonitoredShards(ctx context.Context, options ...ShardDiscoveryOption) (saasv1alpha1.MonitoredShards, error) { - opts := ShardDiscoveryOptions(options) - - sm, err := ss.CRUD.SentinelMasters(ctx) - if err != nil { - return nil, err - } - - monitoredShards := make([]saasv1alpha1.MonitoredShard, 0, len(sm)) - for _, s := range sm { - - var servers map[string]saasv1alpha1.RedisServerDetails - servers, err = ss.DiscoverShard(ctx, s.Name, maxInfoCacheAge, opts) - if err != nil { - return nil, err - } - monitoredShards = append(monitoredShards, - saasv1alpha1.MonitoredShard{ - Name: s.Name, - Servers: servers, - }, - ) - } - return monitoredShards, nil -} - -func serverName(ip string, port int) string { - return fmt.Sprintf("%s:%d", ip, port) -} - -func connectionString(ip string, port int) string { - return fmt.Sprintf("redis://%s:%d", ip, port) -} - -func (ss *SentinelServer) DiscoverShard(ctx context.Context, shard string, maxInfoCacheAge time.Duration, - opts ShardDiscoveryOptions) (map[string]saasv1alpha1.RedisServerDetails, error) { - - logger := log.FromContext(ctx, "function", "(*SentinelServer).DiscoverShard()") - - ///////////////////////////////// - // discover the shard's master // - ///////////////////////////////// - - master, err := ss.CRUD.SentinelMaster(ctx, shard) - if err != nil { - logger.Error(err, fmt.Sprintf("unable to get master for shard %s", shard)) - return nil, err - } - - sn := serverName(master.IP, master.Port) - - // do not try to discover a master flagged as "s_down" or "o_down" - if strings.Contains(master.Flags, "s_down") || strings.Contains(master.Flags, "o_down") { - return nil, fmt.Errorf("%s master %s is s_down/o_down", shard, sn) - } - - result := map[string]saasv1alpha1.RedisServerDetails{ - sn: { - Role: redis_client.Master, - Config: map[string]string{}, - }, - } - - if opts.Has(SaveConfigDiscoveryOpt) { - - // open/reuse a client to the redis server - rs, err := ss.OpenDirectRedisConnection(ctx, master.IP, master.Port) - defer rs.Cleanup(log.FromContext(ctx)) - if err != nil { - logger.Error(err, fmt.Sprintf("unable to open client to master %s", sn)) - return nil, err - } - - save, err := rs.CRUD.RedisConfigGet(ctx, "save") - if err != nil { - logger.Error(err, fmt.Sprintf("unable to get master %s 'save' option", sn)) - return nil, err - } - result[sn].Config["save"] = save - } - - ///////////////////////////////// - // discover the shard's slaves // - ///////////////////////////////// - - if !opts.Has(OnlyMasterDiscoveryOpt) { - slaves, err := ss.CRUD.SentinelSlaves(ctx, shard) - if err != nil { - logger.Error(err, fmt.Sprintf("unable to get slaves for shard %s", shard)) - return nil, err - } - - for _, slave := range slaves { - - // do not try to discover slaves flagged as "s_down" or "o_down" - if !strings.Contains(slave.Flags, "s_down") && !strings.Contains(slave.Flags, "o_down") { - - sn := serverName(slave.IP, slave.Port) - result[sn] = saasv1alpha1.RedisServerDetails{ - Role: redis_client.Slave, - Config: map[string]string{}, - } - - if opts.Has(SaveConfigDiscoveryOpt) || opts.Has(SlaveReadOnlyDiscoveryOpt) { - - // open/reuse a client to the redis server - rs, err := ss.OpenDirectRedisConnection(ctx, slave.IP, slave.Port) - defer rs.Cleanup(log.FromContext(ctx)) - if err != nil { - logger.Error(err, fmt.Sprintf("unable to open client to slave %s", sn)) - return nil, err - } - - if opts.Has(SaveConfigDiscoveryOpt) { - save, err := rs.CRUD.RedisConfigGet(ctx, "save") - if err != nil { - logger.Error(err, fmt.Sprintf("unable to get slave %s 'save' option", sn)) - return nil, err - } - result[sn].Config["save"] = save - } - - if opts.Has(SlaveReadOnlyDiscoveryOpt) { - slaveReadOnly, err := rs.CRUD.RedisConfigGet(ctx, "slave-read-only") - if err != nil { - logger.Error(err, fmt.Sprintf("unable to get slave %s 'slave-read-only' option", sn)) - return nil, err - } - result[sn].Config["slave-read-only"] = slaveReadOnly - } - } - - } - } - } - - return result, nil -} - -func (ss *SentinelServer) OpenDirectRedisConnection(ctx context.Context, ip string, port int) (*RedisServer, error) { - sn := serverName(ip, port) - - // Check if a connection is already open for the ip:port - if rs, ok := ss.MonitoredRedisServers[sn]; ok { - return rs, nil - } - - // open a new connection to redis - return NewRedisServerFromConnectionString(sn, connectionString(ip, port)) -} diff --git a/pkg/redis/sentinel_server_test.go b/pkg/redis/sentinel_server_test.go deleted file mode 100644 index c282ef94..00000000 --- a/pkg/redis/sentinel_server_test.go +++ /dev/null @@ -1,1303 +0,0 @@ -package redis - -import ( - "context" - "errors" - "reflect" - "testing" - "time" - - saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - "github.com/3scale/saas-operator/pkg/redis/crud" - "github.com/3scale/saas-operator/pkg/redis/crud/client" - redis "github.com/3scale/saas-operator/pkg/redis/crud/client" - redis_client "github.com/3scale/saas-operator/pkg/redis/crud/client" - "github.com/go-test/deep" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" -) - -var ( - s *runtime.Scheme = scheme.Scheme - testShardedCluster ShardedCluster = ShardedCluster{ - { - Name: "shard00", - Servers: []RedisServer{ - { - Name: "shard00-0", - Host: "127.0.0.1", - Port: "2000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard00-1", - Host: "127.0.0.1", - Port: "2001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-2", - Host: "127.0.0.1", - Port: "2002", - Role: redis.Slave, - CRUD: nil, - }, - }, - }, - { - Name: "shard01", - Servers: []RedisServer{ - { - Name: "shard01-0", - Host: "127.0.0.1", - Port: "3000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard01-1", - Host: "127.0.0.1", - Port: "3001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard01-2", - Host: "127.0.0.1", - Port: "3002", - Role: redis.Slave, - CRUD: nil, - }, - }, - }, - { - Name: "shard02", - Servers: []RedisServer{ - { - Name: "shard02-0", - Host: "127.0.0.1", - Port: "4000", - Role: redis.Master, - CRUD: nil, - }, - { - Name: "shard02-1", - Host: "127.0.0.1", - Port: "4001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard02-2", - Host: "127.0.0.1", - Port: "4002", - Role: redis.Slave, - CRUD: nil, - }, - }, - }, - } -) - -func init() { - deep.CompareUnexportedFields = true - s.AddKnownTypes(saasv1alpha1.GroupVersion) -} - -func TestNewSentinelServerFromConnectionString(t *testing.T) { - type args struct { - name string - connectionString string - } - tests := []struct { - name string - args args - want *SentinelServer - wantErr bool - }{ - { - name: "Returns a SentinelServer object", - args: args{ - name: "redis://127.0.0.1:6379", - connectionString: "redis://127.0.0.1:6379", - }, - want: &SentinelServer{ - Name: "redis://127.0.0.1:6379", - CRUD: func() *crud.CRUD { c, _ := crud.NewRedisCRUDFromConnectionString("redis://127.0.0.1:6379"); return c }(), - Port: "6379", - IP: "127.0.0.1", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := NewSentinelServerFromConnectionString(tt.args.name, tt.args.connectionString) - if (err != nil) != tt.wantErr { - t.Errorf("NewSentinelServerFromConnectionString() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := deep.Equal(got, tt.want); len(diff) > 0 { - t.Errorf("NewSentinelServer() got diff: %v", diff) - } - }) - } -} - -func TestSentinelServer_IsMonitoringShards(t *testing.T) { - type args struct { - ctx context.Context - shards []string - } - tests := []struct { - name string - ss *SentinelServer - args args - want bool - wantErr bool - }{ - { - name: "All shards monitored by SentinelServer", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard01"}, - []interface{}{"name", "shard02"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard01", "shard02"}, - }, - want: true, - wantErr: false, - }, - { - name: "No shard monitored", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return nil }, - }), - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard01", "shard02"}, - }, - want: false, - wantErr: false, - }, - { - name: "One shard is not monitored", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard01"}, - } - }, - InjectError: func() error { return nil }, - }), - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard01", "shard02"}, - }, - want: false, - wantErr: false, - }, - { - name: "Returns an error", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return errors.New("error") }, - }), - }, - args: args{ - ctx: context.TODO(), - shards: []string{"shard01", "shard02"}, - }, - want: false, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.ss.IsMonitoringShards(tt.args.ctx, tt.args.shards) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelServer.IsMonitoringShards() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("SentinelServer.IsMonitoringShards() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSentinelServer_Monitor(t *testing.T) { - type fields struct { - Name string - IP string - Port string - CRUD *crud.CRUD - } - type args struct { - ctx context.Context - shards ShardedCluster - } - tests := []struct { - name string - fields fields - args args - want []string - wantErr bool - }{ - { - name: "All shards monitored", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.1", - Port: 2000, - } - }, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard01 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard01", - IP: "127.0.0.1", - Port: 3000, - } - }, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard02 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard02", - IP: "127.0.0.1", - Port: 4000, - } - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{}, - wantErr: false, - }, - { - name: "shard01 is not monitored", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.1", - Port: 2000, - } - }, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard01 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard02 - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard02", - IP: "127.0.0.1", - Port: 4000, - } - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{"shard01"}, - wantErr: false, - }, - { - name: "all shards are unmonitored", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard01 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard02 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard02 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard02 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{"shard00", "shard01", "shard02"}, - wantErr: false, - }, - { - name: "All shards unmonitored, failure on the 2nd one", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard01 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New("error") }, - }, - // SentinelMaster response for shard02 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard02 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard02 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{"shard00"}, - wantErr: true, - }, - { - name: "All shards monitored, failure on the 2nd one", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { - return &redis.SentinelMasterCmdResult{ - Name: "shard00", - IP: "127.0.0.1", - Port: 2000, - } - }, - InjectError: func() error { return nil }, - }, - // SentinelMaster response for shard01 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{}, - wantErr: true, - }, - { - name: "'sentinel monitor' fails for shard00, returns no shards changed", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{}, - wantErr: true, - }, - { - name: "Error writing config param, returns shard00 changed", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: testShardedCluster, - }, - want: []string{"shard00"}, - wantErr: true, - }, - { - name: "No master found, returns error, no shards changed", - fields: fields{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - // SentinelMaster response for shard00 (returns error as it is unmonitored) - redis.FakeResponse{ - InjectResponse: func() interface{} { return &redis.SentinelMasterCmdResult{} }, - InjectError: func() error { return errors.New(shardNotInitializedError) }, - }, - // SentinelMonitor response for shard00 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return nil }, - }, - // SentinelSet response for shard01 - redis.FakeResponse{ - InjectResponse: nil, - InjectError: func() error { return errors.New("error") }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - shards: ShardedCluster{{ - Name: "shard00", - Servers: []RedisServer{ - { - Name: "shard00-0", - Host: "127.0.0.1", - Port: "2000", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-1", - Host: "127.0.0.1", - Port: "2001", - Role: redis.Slave, - CRUD: nil, - }, - { - Name: "shard00-2", - Host: "127.0.0.1", - Port: "2002", - Role: redis.Slave, - CRUD: nil, - }, - }, - }}, - }, - want: []string{}, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ss := &SentinelServer{ - Name: tt.fields.Name, - IP: tt.fields.IP, - Port: tt.fields.Port, - CRUD: tt.fields.CRUD, - } - - got, err := ss.Monitor(tt.args.ctx, tt.args.shards) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelServer.Monitor() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("SentinelServer.Monitor() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSentinelServer_MonitoredShards(t *testing.T) { - type args struct { - ctx context.Context - opts ShardDiscoveryOption - } - tests := []struct { - name string - ss *SentinelServer - args args - want saasv1alpha1.MonitoredShards - wantErr bool - }{ - { - name: "Returns all shards monitored by sentinel", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMasters() - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{"name", "shard01", "ip", "127.0.0.1", "port", "6379"}, - []interface{}{"name", "shard02", "ip", "127.0.0.2", "port", "6379"}, - } - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard01", IP: "127.0.0.1", Port: 6379, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard02", IP: "127.0.0.2", Port: 6379, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - args: args{ - ctx: context.TODO(), - opts: OnlyMasterDiscoveryOpt, - }, - want: saasv1alpha1.MonitoredShards{ - { - Name: "shard01", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:6379": {Role: client.Master, Config: map[string]string{}}}, - }, - { - Name: "shard02", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.2:6379": {Role: client.Master, Config: map[string]string{}}, - }, - }, - }, - wantErr: false, - }, - { - name: "Returns an error", - ss: &SentinelServer{ - Name: "test-server", - CRUD: crud.NewFakeCRUD(redis.FakeResponse{ - InjectResponse: func() interface{} { return []interface{}{} }, - InjectError: func() error { return errors.New("error") }, - }), - }, - args: args{ - ctx: context.TODO(), - opts: OnlyMasterDiscoveryOpt, - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.ss.MonitoredShards(tt.args.ctx, tt.args.opts) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelServer.MonitoredShards() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("SentinelServer.MonitoredShards() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSentinelServer_DiscoverShard(t *testing.T) { - type args struct { - ctx context.Context - shard string - maxInfoCacheAge time.Duration - opts ShardDiscoveryOptions - } - tests := []struct { - name string - ss *SentinelServer - rss []RedisServer - args args - want map[string]saasv1alpha1.RedisServerDetails - wantErr bool - }{ - { - name: "Discovers roles and config options within a shard (all available options)", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelSlaves - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{ - "name", "127.0.0.1:2000", - "ip", "127.0.0.1", - "port", "2000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:3000", - "ip", "127.0.0.1", - "port", "3000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:4000", - "ip", "127.0.0.1", - "port", "4000", - "flags", "slave", - }, - } - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{ - // redis master - "127.0.0.1:1000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }), - }, - // redis slaves - "127.0.0.1:2000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:3000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:4000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "no"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - }, - }, - args: args{ - opts: ShardDiscoveryOptions{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, - }, - want: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1000": { - Role: client.Master, - Config: map[string]string{"save": "900 1 300 10"}, - }, - "127.0.0.1:2000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, - }, - "127.0.0.1:3000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, - }, - "127.0.0.1:4000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "no"}, - }, - }, - wantErr: false, - }, - { - name: "Only discovers master, no config", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{}, - }, - args: args{ - opts: ShardDiscoveryOptions{OnlyMasterDiscoveryOpt}, - }, - want: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1000": { - Role: client.Master, - Config: map[string]string{}, - }, - }, - wantErr: false, - }, - { - name: "Discovers roles and slave-read-only option", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelSlaves - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{ - "name", "127.0.0.1:2000", - "ip", "127.0.0.1", - "port", "2000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:3000", - "ip", "127.0.0.1", - "port", "3000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:4000", - "ip", "127.0.0.1", - "port", "4000", - "flags", "slave", - }, - } - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{ - // redis master - "127.0.0.1:1000": { - CRUD: crud.NewFakeCRUD(), - }, - // redis slaves - "127.0.0.1:2000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:3000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:4000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "no"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - }, - }, - args: args{ - opts: ShardDiscoveryOptions{SlaveReadOnlyDiscoveryOpt}, - }, - want: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1000": { - Role: client.Master, - Config: map[string]string{}, - }, - "127.0.0.1:2000": { - Role: client.Slave, - Config: map[string]string{"slave-read-only": "yes"}, - }, - "127.0.0.1:3000": { - Role: client.Slave, - Config: map[string]string{"slave-read-only": "yes"}, - }, - "127.0.0.1:4000": { - Role: client.Slave, - Config: map[string]string{"slave-read-only": "no"}, - }, - }, - wantErr: false, - }, - { - name: "Discovers roles and save option", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelSlaves - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{ - "name", "127.0.0.1:2000", - "ip", "127.0.0.1", - "port", "2000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:3000", - "ip", "127.0.0.1", - "port", "3000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:4000", - "ip", "127.0.0.1", - "port", "4000", - "flags", "slave", - }, - } - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{ - // redis master - "127.0.0.1:1000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - )}, - // redis slaves - "127.0.0.1:2000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:3000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:4000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - }, - }, - args: args{ - opts: ShardDiscoveryOptions{SaveConfigDiscoveryOpt}, - }, - want: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1000": { - Role: client.Master, - Config: map[string]string{"save": "900 1 300 10"}, - }, - "127.0.0.1:2000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10"}, - }, - "127.0.0.1:3000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10"}, - }, - "127.0.0.1:4000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10"}, - }, - }, - wantErr: false, - }, - { - name: "Avoids down slaves", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: SentinelSlaves - InjectResponse: func() interface{} { - return []interface{}{ - []interface{}{ - "name", "127.0.0.1:2000", - "ip", "127.0.0.1", - "port", "2000", - "flags", "slave,s_down", - }, - []interface{}{ - "name", "127.0.0.1:3000", - "ip", "127.0.0.1", - "port", "3000", - "flags", "slave", - }, - []interface{}{ - "name", "127.0.0.1:4000", - "ip", "127.0.0.1", - "port", "4000", - "flags", "slave", - }, - } - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{ - // redis master - "127.0.0.1:1000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }), - }, - // redis slaves - "127.0.0.1:2000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:3000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "yes"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - "127.0.0.1:4000": { - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: RedisConfigGet("save") - InjectResponse: func() interface{} { - return []interface{}{"save", "900 1 300 10"} - }, - InjectError: func() error { return nil }, - }, - redis.FakeResponse{ - // cmd: RedisConfigGet("slave-read-only") - InjectResponse: func() interface{} { - return []interface{}{"slave-read-only", "no"} - }, - InjectError: func() error { return nil }, - }, - ), - }, - }, - }, - args: args{ - opts: ShardDiscoveryOptions{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, - }, - want: map[string]saasv1alpha1.RedisServerDetails{ - "127.0.0.1:1000": { - Role: client.Master, - Config: map[string]string{"save": "900 1 300 10"}, - }, - "127.0.0.1:3000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, - }, - "127.0.0.1:4000": { - Role: client.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "no"}, - }, - }, - wantErr: false, - }, - { - name: "Fails if master is down", - ss: &SentinelServer{ - Name: "tsentinel", - CRUD: crud.NewFakeCRUD( - redis.FakeResponse{ - // cmd: SentinelMaster - InjectResponse: func() interface{} { - return &redis_client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "o_down,master"} - }, - InjectError: func() error { return nil }, - }, - ), - MonitoredRedisServers: map[string]*RedisServer{}, - }, - args: args{ - opts: ShardDiscoveryOptions{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, - }, - want: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.ss.DiscoverShard(tt.args.ctx, tt.args.shard, tt.args.maxInfoCacheAge, tt.args.opts) - if (err != nil) != tt.wantErr { - t.Errorf("SentinelServer.DiscoverShard() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("SentinelServer.DiscoverShard() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestShardDiscoveryOptions_Has(t *testing.T) { - type args struct { - sdo ShardDiscoveryOption - } - tests := []struct { - name string - sdos ShardDiscoveryOptions - args args - want bool - }{ - { - name: "Returns true if option in slice", - sdos: []ShardDiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, - args: args{SlaveReadOnlyDiscoveryOpt}, - want: true, - }, - { - name: "Returns false if option not in slice", - sdos: []ShardDiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, - args: args{OnlyMasterDiscoveryOpt}, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.sdos.Has(tt.args.sdo); got != tt.want { - t.Errorf("ShardDiscoveryOptions.Has() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/redis/server/fake.go b/pkg/redis/server/fake.go new file mode 100644 index 00000000..b6219b79 --- /dev/null +++ b/pkg/redis/server/fake.go @@ -0,0 +1,14 @@ +package server + +import "github.com/3scale/saas-operator/pkg/redis/client" + +// NewFakeServerWithFakeClient returns a fake server with a fake client that will return the +// provided responses when called. This is only intended for testing. +func NewFakeServerWithFakeClient(host, port string, responses ...client.FakeResponse) *Server { + rsp := []client.FakeResponse{} + return &Server{ + host: host, + port: port, + client: &client.FakeClient{Responses: append(rsp, responses...)}, + } +} diff --git a/pkg/redis/server/pool.go b/pkg/redis/server/pool.go new file mode 100644 index 00000000..d4524a3b --- /dev/null +++ b/pkg/redis/server/pool.go @@ -0,0 +1,72 @@ +package server + +import ( + "net" + "sort" + "sync" + + "github.com/go-redis/redis/v8" +) + +// ServerPool holds a thread safe list of Servers. +// It is intended for client reuse throughout the code. +type ServerPool struct { + servers []*Server + mu sync.Mutex +} + +func NewServerPool(servers ...*Server) *ServerPool { + if len(servers) > 0 { + return &ServerPool{ + servers: servers, + } + } + + return &ServerPool{ + servers: []*Server{}, + } +} + +func (pool *ServerPool) GetServer(connectionString string, alias *string) (*Server, error) { + var srv *Server + var err error + + // make sure both reads and writes are consistent + // might cause some contention but grants consistency + pool.mu.Lock() + defer pool.mu.Unlock() + + opts, err := redis.ParseURL(connectionString) + if err != nil { + return nil, err + } + if srv = pool.indexByHostPort()[opts.Addr]; srv != nil { + // set the alias if it has been passed down + if alias != nil && srv.GetAlias() != *alias { + srv.SetAlias(*alias) + } + return srv, nil + } + + // If a Server was not found, create a new one and return it + if srv, err = NewServer(connectionString, alias); err != nil { + return nil, err + } + pool.servers = append(pool.servers, srv) + + // sort the slice to obtain consistent results + sort.Slice(pool.servers, func(i, j int) bool { + return pool.servers[i].ID() < pool.servers[j].ID() + }) + + return srv, nil +} + +func (pool *ServerPool) indexByHostPort() map[string]*Server { + index := make(map[string]*Server, len(pool.servers)) + for _, srv := range pool.servers { + index[net.JoinHostPort(srv.host, srv.port)] = srv + } + + return index +} diff --git a/pkg/redis/server/pool_test.go b/pkg/redis/server/pool_test.go new file mode 100644 index 00000000..f68691c7 --- /dev/null +++ b/pkg/redis/server/pool_test.go @@ -0,0 +1,124 @@ +package server + +import ( + "reflect" + "testing" + + "github.com/3scale/saas-operator/pkg/util" +) + +func TestServerPool_GetServer(t *testing.T) { + type fields struct { + servers []*Server + } + type args struct { + connectionString string + alias *string + } + tests := []struct { + name string + fields fields + args args + want *Server + wantErr bool + }{ + { + name: "Gets the server by hostport", + fields: fields{ + servers: []*Server{ + {alias: "host1", client: nil, host: "127.0.0.1", port: "1000"}, + {alias: "host2", client: nil, host: "127.0.0.2", port: "2000"}, + }}, + args: args{ + connectionString: "redis://127.0.0.2:2000", + }, + want: &Server{alias: "host2", client: nil, host: "127.0.0.2", port: "2000"}, + wantErr: false, + }, + { + name: "Gets the server by hostport and sets the alias", + fields: fields{ + servers: []*Server{ + {alias: "host1", client: nil, host: "127.0.0.1", port: "1000"}, + {alias: "", client: nil, host: "127.0.0.2", port: "2000"}, + }}, + args: args{ + connectionString: "redis://127.0.0.2:2000", + alias: util.Pointer("host2"), + }, + want: &Server{alias: "host2", client: nil, host: "127.0.0.2", port: "2000"}, + wantErr: false, + }, + { + name: "Returns error", + fields: fields{ + servers: []*Server{}}, + args: args{ + connectionString: "host", + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool := &ServerPool{ + servers: tt.fields.servers, + } + got, err := pool.GetServer(tt.args.connectionString, tt.args.alias) + if (err != nil) != tt.wantErr { + t.Errorf("ServerPool.GetServer() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ServerPool.GetServer() = %v, want %v", got, tt.want) + } + }) + } + + t.Run("Adds a new server to the pool", func(t *testing.T) { + pool := &ServerPool{ + servers: []*Server{{alias: "host1", client: nil, port: "1000"}}, + } + new, _ := pool.GetServer("redis://127.0.0.2:2000", util.Pointer("host2")) + exists, _ := pool.GetServer("redis://127.0.0.2:2000", util.Pointer("host2")) + if new != exists { + t.Errorf("ServerPool.GetServer() = %v, want %v", new, exists) + } + }) +} + +func TestServerPool_indexByHost(t *testing.T) { + type fields struct { + servers []*Server + } + tests := []struct { + name string + fields fields + want map[string]*Server + }{ + { + name: "Returns a map indexed by host", + fields: fields{ + servers: []*Server{ + {alias: "host1", client: nil, host: "127.0.0.1", port: "1000"}, + {alias: "host2", client: nil, host: "127.0.0.2", port: "2000"}, + }, + }, + want: map[string]*Server{ + "127.0.0.1:1000": {alias: "host1", client: nil, host: "127.0.0.1", port: "1000"}, + "127.0.0.2:2000": {alias: "host2", client: nil, host: "127.0.0.2", port: "2000"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool := &ServerPool{ + servers: tt.fields.servers, + } + if got := pool.indexByHostPort(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ServerPool.indexByHost() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/redis/server/server.go b/pkg/redis/server/server.go new file mode 100644 index 00000000..09d44b00 --- /dev/null +++ b/pkg/redis/server/server.go @@ -0,0 +1,268 @@ +package server + +import ( + "bufio" + "context" + "net" + "strings" + "sync" + "time" + + "github.com/3scale/saas-operator/pkg/redis/client" + "github.com/go-redis/redis/v8" +) + +// Server is a host that talks the redis protocol +// Contains methods to use a subset of redis commands +type Server struct { + alias string + client client.TestableInterface + host string + port string + mu sync.Mutex +} + +// NewServer returns a new client for this redis server from the given connection +// string. It can optionally be passed an alias to identify the server. +func NewServer(connectionString string, alias *string) (*Server, error) { + + opt, err := redis.ParseURL(connectionString) + if err != nil { + return nil, err + } + + host, port, err := net.SplitHostPort(opt.Addr) + if err != nil { + return nil, err + } + + srv := &Server{ + host: host, + port: port, + client: client.NewFromOptions(opt), + } + + if alias != nil { + srv.SetAlias(*alias) + } + + return srv, nil +} + +func MustNewServer(connectionString string, alias *string) *Server { + srv, err := NewServer(connectionString, alias) + if err != nil { + panic(err) + } + return srv +} + +func NewServerFromParams(alias, host, port string, c client.TestableInterface) *Server { + return &Server{ + alias: alias, + host: host, + port: port, + client: c, + } +} + +func (srv *Server) CloseClient() error { + return srv.client.Close() +} + +func (srv *Server) GetClient() client.TestableInterface { + return srv.client +} + +func (srv *Server) GetHost() string { + return srv.host +} + +func (srv *Server) GetPort() string { + return srv.port +} + +func (srv *Server) GetAlias() string { + if srv.alias != "" { + return srv.alias + } + return srv.ID() +} + +func (srv *Server) SetAlias(alias string) { + srv.mu.Lock() + srv.alias = alias + srv.mu.Unlock() +} + +// ID returns the ID of the server, which takes the form "host:port" +func (srv *Server) ID() string { + return net.JoinHostPort(srv.host, srv.port) +} + +func (srv *Server) SentinelMaster(ctx context.Context, shard string) (*client.SentinelMasterCmdResult, error) { + + result, err := srv.client.SentinelMaster(ctx, shard) + if err != nil { + return nil, err + } + return result, nil +} + +func (srv *Server) SentinelMasters(ctx context.Context) ([]client.SentinelMasterCmdResult, error) { + + values, err := srv.client.SentinelMasters(ctx) + if err != nil { + return nil, err + } + + result := make([]client.SentinelMasterCmdResult, len(values)) + for i, val := range values { + masterResult := &client.SentinelMasterCmdResult{} + err := sliceCmdToStruct(val, masterResult) + if err != nil { + return nil, err + } + result[i] = *masterResult + } + + return result, nil +} + +func (srv *Server) SentinelSlaves(ctx context.Context, shard string) ([]client.SentinelSlaveCmdResult, error) { + + values, err := srv.client.SentinelSlaves(ctx, shard) + if err != nil { + return nil, err + } + + result := make([]client.SentinelSlaveCmdResult, len(values)) + for i, val := range values { + slaveResult := &client.SentinelSlaveCmdResult{} + err := sliceCmdToStruct(val, slaveResult) + if err != nil { + return nil, err + } + result[i] = *slaveResult + } + + return result, nil +} + +func (srv *Server) SentinelMonitor(ctx context.Context, name, host string, port string, quorum int) error { + return srv.client.SentinelMonitor(ctx, name, host, port, quorum) +} + +func (srv *Server) SentinelSet(ctx context.Context, shard, parameter, value string) error { + return srv.client.SentinelSet(ctx, shard, parameter, value) +} + +func (srv *Server) SentinelPSubscribe(ctx context.Context, events ...string) (<-chan *redis.Message, func() error) { + return srv.client.SentinelPSubscribe(ctx, events...) +} + +func (srv *Server) SentinelInfoCache(ctx context.Context) (client.SentinelInfoCache, error) { + result := client.SentinelInfoCache{} + + raw, err := srv.client.SentinelInfoCache(ctx) + mval := islice2imap(raw) + + for shard, servers := range mval { + result[shard] = make(map[string]client.RedisServerInfoCache, len(servers.([]interface{}))) + + for _, server := range servers.([]interface{}) { + // When sentinel is unable to reach the redis slave the info field can be nil + // so we have to check this to avoid panics + if server.([]interface{})[1] != nil { + info := InfoStringToMap(server.([]interface{})[1].(string)) + result[shard][info["run_id"]] = client.RedisServerInfoCache{ + CacheAge: time.Duration(server.([]interface{})[0].(int64)) * time.Millisecond, + Info: info, + } + } + } + } + + return result, err +} + +func (srv *Server) SentinelPing(ctx context.Context) error { + return srv.client.SentinelPing(ctx) +} + +func (srv *Server) RedisRole(ctx context.Context) (client.Role, string, error) { + val, err := srv.client.RedisRole(ctx) + if err != nil { + return client.Unknown, "", err + } + + if client.Role(val.([]interface{})[0].(string)) == client.Master { + return client.Master, "", nil + } else { + return client.Slave, val.([]interface{})[1].(string), nil + } +} + +func (srv *Server) RedisConfigGet(ctx context.Context, parameter string) (string, error) { + val, err := srv.client.RedisConfigGet(ctx, parameter) + if err != nil { + return "", err + } + return val[1].(string), nil +} + +func (srv *Server) RedisConfigSet(ctx context.Context, parameter, value string) error { + return srv.client.RedisConfigSet(ctx, parameter, value) +} + +func (srv *Server) RedisSlaveOf(ctx context.Context, host, port string) error { + return srv.client.RedisSlaveOf(ctx, host, port) +} + +func (srv *Server) RedisDebugSleep(ctx context.Context, duration time.Duration) error { + return srv.client.RedisDebugSleep(ctx, duration) +} + +// This is a horrible function to parse the horrible structs that the go-redis +// client returns for administrative commands. I swear it's not my fault ... +func sliceCmdToStruct(in interface{}, out interface{}) error { + m := map[string]string{} + for i := range in.([]interface{}) { + if i%2 != 0 { + continue + } + m[in.([]interface{})[i].(string)] = in.([]interface{})[i+1].(string) + } + + err := redis.NewStringStringMapResult(m, nil).Scan(out) + if err != nil { + return err + } + return nil +} + +func islice2imap(in interface{}) map[string]interface{} { + m := map[string]interface{}{} + for i := range in.([]interface{}) { + if i%2 != 0 { + continue + } + m[in.([]interface{})[i].(string)] = in.([]interface{})[i+1].([]interface{}) + } + return m +} + +func InfoStringToMap(in string) map[string]string { + + m := map[string]string{} + scanner := bufio.NewScanner(strings.NewReader(in)) + for scanner.Scan() { + // do not add empty lines or section headings (see the test for more info) + if line := scanner.Text(); line != "" && !strings.HasPrefix(line, "# ") { + kv := strings.SplitN(line, ":", 2) + m[kv[0]] = kv[1] + } + } + + return m +} diff --git a/pkg/redis/crud/crud_test.go b/pkg/redis/server/server_test.go similarity index 94% rename from pkg/redis/crud/crud_test.go rename to pkg/redis/server/server_test.go index 120b4085..274339a4 100644 --- a/pkg/redis/crud/crud_test.go +++ b/pkg/redis/server/server_test.go @@ -1,4 +1,4 @@ -package crud +package server import ( "context" @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/3scale/saas-operator/pkg/redis/crud/client" + "github.com/3scale/saas-operator/pkg/redis/client" "github.com/go-redis/redis/v8" "github.com/go-test/deep" ) @@ -16,25 +16,28 @@ func init() { deep.CompareUnexportedFields = true } -func TestNewRedisCRUD(t *testing.T) { +func TestNewGoRedisClientFromConnectionString(t *testing.T) { type args struct { connectionString string } tests := []struct { name string args args - want *CRUD + want *Server wantErr bool }{ { - name: "Returns a CRUD object", + name: "Returns a client object", args: args{ connectionString: "redis://127.0.0.1:1234", }, - want: &CRUD{ - Client: func() Client { c, _ := client.NewFromConnectionString("redis://127.0.0.1:1234"); return c }(), - IP: "127.0.0.1", - Port: "1234", + want: &Server{ + client: func() client.TestableInterface { + c, _ := client.NewFromConnectionString("redis://127.0.0.1:1234") + return c + }(), + host: "127.0.0.1", + port: "1234", }, wantErr: false, }, @@ -49,7 +52,7 @@ func TestNewRedisCRUD(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := NewRedisCRUDFromConnectionString(tt.args.connectionString) + got, err := NewServer(tt.args.connectionString, nil) if (err != nil) != tt.wantErr { t.Errorf("NewRedisCRUD() error = %v, wantErr %v", err, tt.wantErr) return @@ -61,10 +64,10 @@ func TestNewRedisCRUD(t *testing.T) { } } -func TestClient_GetIP(t *testing.T) { +func TestClient_GetHost(t *testing.T) { type fields struct { - client Client - ip string + client client.TestableInterface + host string port string } tests := []struct { @@ -73,10 +76,10 @@ func TestClient_GetIP(t *testing.T) { want string }{ { - name: "Returns the server IP", + name: "Returns the server host", fields: fields{ client: nil, - ip: "127.0.0.1", + host: "127.0.0.1", port: "2222", }, want: "127.0.0.1", @@ -84,13 +87,13 @@ func TestClient_GetIP(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, + host: tt.fields.host, } - if got := sc.GetIP(); got != tt.want { - t.Errorf("Client.GetIP() = %v, want %v", got, tt.want) + if got := sc.GetHost(); got != tt.want { + t.Errorf("Client.GetHost() = %v, want %v", got, tt.want) } }) } @@ -98,7 +101,8 @@ func TestClient_GetIP(t *testing.T) { func TestClient_GetPort(t *testing.T) { type fields struct { - client Client + client client.TestableInterface + host string ip string port string } @@ -111,6 +115,7 @@ func TestClient_GetPort(t *testing.T) { name: "Returns the server port", fields: fields{ client: nil, + host: "127.0.0.1", ip: "127.0.0.1", port: "2222", }, @@ -119,10 +124,9 @@ func TestClient_GetPort(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } if got := sc.GetPort(); got != tt.want { t.Errorf("Client.GetPort() = %v, want %v", got, tt.want) @@ -133,7 +137,8 @@ func TestClient_GetPort(t *testing.T) { func TestClient_SentinelMaster(t *testing.T) { type fields struct { - client Client + client client.TestableInterface + host string ip string port string } @@ -217,6 +222,7 @@ func TestClient_SentinelMaster(t *testing.T) { InjectError: func() error { return errors.New("error") }, }}, }, + host: "127.0.0.1", ip: "abc", port: "abc", }, @@ -227,10 +233,9 @@ func TestClient_SentinelMaster(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } got, err := sc.SentinelMaster(tt.args.ctx, tt.args.shard) if (err != nil) != tt.wantErr { @@ -246,7 +251,7 @@ func TestClient_SentinelMaster(t *testing.T) { func TestClient_SentinelMasters(t *testing.T) { type fields struct { - client Client + client client.TestableInterface ip string port string } @@ -387,10 +392,9 @@ func TestClient_SentinelMasters(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } got, err := sc.SentinelMasters(tt.args.ctx) if (err != nil) != tt.wantErr { @@ -406,7 +410,7 @@ func TestClient_SentinelMasters(t *testing.T) { func TestClient_SentinelSlaves(t *testing.T) { type fields struct { - client Client + client client.TestableInterface ip string port string } @@ -547,10 +551,9 @@ func TestClient_SentinelSlaves(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } got, err := sc.SentinelSlaves(tt.args.ctx, tt.args.shard) if (err != nil) != tt.wantErr { @@ -566,7 +569,8 @@ func TestClient_SentinelSlaves(t *testing.T) { func TestClient_SentinelMonitor(t *testing.T) { type fields struct { - client Client + client client.TestableInterface + host string ip string port string } @@ -620,10 +624,10 @@ func TestClient_SentinelMonitor(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + host: tt.fields.host, + port: tt.fields.port, } if err := sc.SentinelMonitor(tt.args.ctx, tt.args.name, tt.args.host, tt.args.port, tt.args.quorum); (err != nil) != tt.wantErr { t.Errorf("Client.SentinelMonitor() error = %v, wantErr %v", err, tt.wantErr) @@ -634,7 +638,8 @@ func TestClient_SentinelMonitor(t *testing.T) { func TestClient_SentinelSet(t *testing.T) { type fields struct { - client Client + client client.TestableInterface + host string ip string port string } @@ -688,10 +693,10 @@ func TestClient_SentinelSet(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + host: tt.fields.host, + port: tt.fields.port, } if err := sc.SentinelSet(tt.args.ctx, tt.args.shard, tt.args.parameter, tt.args.value); (err != nil) != tt.wantErr { t.Errorf("Client.SentinelSet() error = %v, wantErr %v", err, tt.wantErr) @@ -702,7 +707,8 @@ func TestClient_SentinelSet(t *testing.T) { func TestCRUD_SentinelPSubscribe(t *testing.T) { type fields struct { - client Client + client client.TestableInterface + host string ip string port string } @@ -747,10 +753,10 @@ func TestCRUD_SentinelPSubscribe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - crud := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + crud := &Server{ + client: tt.fields.client, + host: tt.fields.host, + port: tt.fields.port, } timeout := time.After(100 * time.Millisecond) done := make(chan bool) @@ -774,7 +780,7 @@ func TestCRUD_SentinelPSubscribe(t *testing.T) { func TestCRUD_SentinelInfoCache(t *testing.T) { type fields struct { - Client Client + Client client.TestableInterface IP string Port string } @@ -946,10 +952,9 @@ func TestCRUD_SentinelInfoCache(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - crud := &CRUD{ - Client: tt.fields.Client, - IP: tt.fields.IP, - Port: tt.fields.Port, + crud := &Server{ + client: tt.fields.Client, + port: tt.fields.Port, } got, err := crud.SentinelInfoCache(tt.args.ctx) if (err != nil) != tt.wantErr { @@ -965,7 +970,7 @@ func TestCRUD_SentinelInfoCache(t *testing.T) { func TestClient_RedisRole(t *testing.T) { type fields struct { - client Client + client client.TestableInterface ip string port string } @@ -1057,10 +1062,9 @@ func TestClient_RedisRole(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } got, got1, err := sc.RedisRole(tt.args.ctx) if (err != nil) != tt.wantErr { @@ -1079,7 +1083,7 @@ func TestClient_RedisRole(t *testing.T) { func TestClient_RedisConfigGet(t *testing.T) { type fields struct { - client Client + client client.TestableInterface ip string port string } @@ -1139,10 +1143,9 @@ func TestClient_RedisConfigGet(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } got, err := sc.RedisConfigGet(tt.args.ctx, tt.args.parameter) if (err != nil) != tt.wantErr { @@ -1158,7 +1161,7 @@ func TestClient_RedisConfigGet(t *testing.T) { func TestClient_RedisSlaveOf(t *testing.T) { type fields struct { - client Client + client client.TestableInterface ip string port string } @@ -1206,10 +1209,9 @@ func TestClient_RedisSlaveOf(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sc := &CRUD{ - Client: tt.fields.client, - IP: tt.fields.ip, - Port: tt.fields.port, + sc := &Server{ + client: tt.fields.client, + port: tt.fields.port, } if err := sc.RedisSlaveOf(tt.args.ctx, tt.args.host, tt.args.port); (err != nil) != tt.wantErr { t.Errorf("Client.RedisSlaveOf() error = %v, wantErr %v", err, tt.wantErr) @@ -1407,7 +1409,7 @@ func Test_infoStringToMap(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := infoStringToMap(tt.args.in); !reflect.DeepEqual(got, tt.want) { + if got := InfoStringToMap(tt.args.in); !reflect.DeepEqual(got, tt.want) { t.Errorf("infoStringToMap() = %v, want %v", got, tt.want) } }) diff --git a/pkg/redis/sharded/discover.go b/pkg/redis/sharded/discover.go new file mode 100644 index 00000000..b6ff5814 --- /dev/null +++ b/pkg/redis/sharded/discover.go @@ -0,0 +1,74 @@ +package sharded + +import ( + "context" + "fmt" + + "github.com/3scale/saas-operator/pkg/redis/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type DiscoveryOptionSet []DiscoveryOption + +type DiscoveryOption int + +const ( + SlaveReadOnlyDiscoveryOpt DiscoveryOption = iota + SaveConfigDiscoveryOpt + OnlyMasterDiscoveryOpt +) + +func (set DiscoveryOptionSet) Has(opt DiscoveryOption) bool { + for _, o := range set { + if opt == o { + return true + } + } + return false +} + +// Discover returns the characteristincs for a given +// redis Server +// It always gets the role first +func (srv *RedisServer) Discover(ctx context.Context, opts ...DiscoveryOption) error { + logger := log.FromContext(ctx, "function", "(*RedisServer).Discover()") + + role, _, err := srv.RedisRole(ctx) + if err != nil { + srv.Role = client.Unknown + logger.Error(err, fmt.Sprintf("unable to get %s|%s|%s role", srv.GetAlias(), srv.Role, srv.ID())) + return err + } + srv.Role = role + + if srv.Config == nil { + srv.Config = map[string]string{} + } + + if DiscoveryOptionSet(opts).Has(SaveConfigDiscoveryOpt) { + + save, err := srv.RedisConfigGet(ctx, "save") + if err != nil { + logger.Error(err, fmt.Sprintf("unable to get %s|%s|%s 'save' option", srv.GetAlias(), srv.Role, srv.ID())) + return err + } + srv.Config["save"] = save + } + + if DiscoveryOptionSet(opts).Has(SlaveReadOnlyDiscoveryOpt) && role != client.Master { + slaveReadOnly, err := srv.RedisConfigGet(ctx, "slave-read-only") + if err != nil { + logger.Error(err, fmt.Sprintf("unable to get %s|%s|%s 'slave-read-only' option", srv.GetAlias(), srv.Role, srv.ID())) + return err + } + srv.Config["slave-read-only"] = slaveReadOnly + } + + return nil +} + +// Discovery errors +type DiscoveryError_Sentinel_Failure struct{ error } +type DiscoveryError_Master_SingleServerFailure struct{ error } +type DiscoveryError_Slave_SingleServerFailure struct{ error } +type DiscoveryError_UnknownRole_SingleServerFailure struct{ error } diff --git a/pkg/redis/sharded/discover_test.go b/pkg/redis/sharded/discover_test.go new file mode 100644 index 00000000..2381d574 --- /dev/null +++ b/pkg/redis/sharded/discover_test.go @@ -0,0 +1,185 @@ +package sharded + +import ( + "context" + "errors" + "testing" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/go-test/deep" +) + +func TestDiscoveryOptionSet_Has(t *testing.T) { + type args struct { + opt DiscoveryOption + } + tests := []struct { + name string + set DiscoveryOptionSet + args args + want bool + }{ + { + name: "Returns true if option in slice", + set: DiscoveryOptionSet{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, + args: args{opt: SlaveReadOnlyDiscoveryOpt}, + want: true, + }, + { + name: "Returns false if option not in slice", + set: DiscoveryOptionSet{SlaveReadOnlyDiscoveryOpt}, + args: args{opt: SaveConfigDiscoveryOpt}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.set.Has(tt.args.opt); got != tt.want { + t.Errorf("DiscoveryOptions.Has() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRedisServer_Discover(t *testing.T) { + type fields struct { + Server *redis.Server + Role client.Role + Config map[string]string + } + type args struct { + ctx context.Context + opts []DiscoveryOption + } + tests := []struct { + name string + fields fields + args args + wantRole client.Role + wantConfig map[string]string + wantErr bool + }{ + { + name: "Discovers a master", + fields: fields{ + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"master", ""} + }, + InjectError: func() error { return nil }, + }, + )}, + args: args{ctx: context.TODO(), opts: DiscoveryOptionSet{}}, + wantRole: client.Master, + wantConfig: map[string]string{}, + wantErr: false, + }, + { + name: "Discovers a master with save config", + fields: fields{ + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"master", ""} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: RedisConfigGet("save") + InjectResponse: func() interface{} { + return []interface{}{"save", "900 1 300 10"} + }, + InjectError: func() error { return nil }, + }, + ), + }, + args: args{ctx: context.TODO(), opts: DiscoveryOptionSet{SaveConfigDiscoveryOpt}}, + wantRole: client.Master, + wantConfig: map[string]string{"save": "900 1 300 10"}, + wantErr: false, + }, + { + name: "Discovers a ro slave", + fields: fields{ + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"slave", "127.0.0.1:3333"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"read-only", "yes"} + }, + InjectError: func() error { return nil }, + }, + ), + }, + args: args{ctx: context.TODO(), opts: DiscoveryOptionSet{SlaveReadOnlyDiscoveryOpt}}, + wantRole: client.Slave, + wantConfig: map[string]string{"slave-read-only": "yes"}, + wantErr: false, + }, + { + name: "Discovers a rw slave", + fields: fields{ + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"slave", "127.0.0.1:3333"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"read-only", "no"} + }, + InjectError: func() error { return nil }, + }, + ), + }, + args: args{ctx: context.TODO(), opts: DiscoveryOptionSet{SlaveReadOnlyDiscoveryOpt}}, + wantRole: client.Slave, + wantConfig: map[string]string{"slave-read-only": "no"}, + wantErr: false, + }, + { + name: "'role' command fails, returns an error", + fields: fields{ + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { return []interface{}{} }, + InjectError: func() error { return errors.New("error") }, + }, + ), + }, + args: args{ctx: context.TODO(), opts: DiscoveryOptionSet{}}, + wantRole: client.Unknown, + wantConfig: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := &RedisServer{ + Server: tt.fields.Server, + Role: tt.fields.Role, + Config: tt.fields.Config, + } + + if err := srv.Discover(tt.args.ctx, tt.args.opts...); (err != nil) != tt.wantErr { + t.Errorf("RedisServer.Discover() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantRole != srv.Role { + t.Errorf("RedisServer.Discover() got = %v, want %v", srv.Role, tt.wantRole) + } + if diff := deep.Equal(srv.Config, tt.wantConfig); len(diff) > 0 { + t.Errorf("RedisServer.Discover() got diff: %v", diff) + } + }) + } +} diff --git a/pkg/redis/sharded/redis_server.go b/pkg/redis/sharded/redis_server.go new file mode 100644 index 00000000..52389677 --- /dev/null +++ b/pkg/redis/sharded/redis_server.go @@ -0,0 +1,116 @@ +package sharded + +import ( + "context" + "fmt" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type RedisServer struct { + *redis.Server + Role client.Role + Config map[string]string +} + +func NewRedisServerFromPool(connectionString string, alias *string, pool *redis.ServerPool) (*RedisServer, error) { + srv, err := pool.GetServer(connectionString, alias) + if err != nil { + return nil, err + } + + return &RedisServer{ + Server: srv, + Role: client.Unknown, + Config: map[string]string{}, + }, nil +} + +func NewRedisServerFromParams(srv *redis.Server, role client.Role, config map[string]string) *RedisServer { + return &RedisServer{ + Server: srv, + Role: role, + Config: config, + } +} + +func (srv *RedisServer) InitMaster(ctx context.Context) (bool, error) { + logger := log.FromContext(ctx, "function", "(*RedisServer).InitMaster") + + role, slaveof, err := srv.RedisRole(ctx) + if err != nil { + return false, err + } + + switch role { + case client.Slave: + + if slaveof == "127.0.0.1" { + // needs initialization + if err := srv.RedisSlaveOf(ctx, "NO", "ONE"); err != nil { + return false, err + } + logger.Info(fmt.Sprintf("configured %s|%s as master", srv.GetAlias(), srv.ID())) + return true, nil + + } else { + srv.Role = client.Slave + } + + case client.Master: + srv.Role = client.Master + } + + return false, nil +} + +func (srv *RedisServer) InitSlave(ctx context.Context, master *RedisServer) (bool, error) { + + logger := log.FromContext(ctx, "function", "(*RedisServer).InitSlave") + + role, slaveof, err := srv.RedisRole(ctx) + if err != nil { + return false, err + } + + switch role { + case client.Slave: + + // needs initialization + if slaveof == "127.0.0.1" { + // validate first that the master is ready + role, _, err := master.RedisRole(ctx) + if err != nil || role != client.Master { + err := fmt.Errorf("shard master %s|%s is not ready", master.GetAlias(), master.ID()) + logger.Error(err, "slave init failed") + return false, err + + } else { + // if master ok, init slave + if err := srv.RedisSlaveOf(ctx, master.GetHost(), master.GetPort()); err != nil { + return false, err + } + logger.Info(fmt.Sprintf("configured %s|%s as slave", srv.GetAlias(), srv.ID())) + return true, nil + } + + } else { + srv.Role = client.Slave + // FOR DEBUGGING + // val, err := srv.GetClient().RedisDo(ctx, "info", "replication") + // if err != nil { + // logger.Error(err, "unable to get info") + // } else { + // logger.Info("dump replication status", "Slave", srv.GetAlias()) + // logger.Info(fmt.Sprintf("%s", redis.InfoStringToMap(val.(string)))) + // } + } + + case client.Master: + srv.Role = client.Master + } + + return false, nil +} diff --git a/pkg/redis/sharded/redis_server_test.go b/pkg/redis/sharded/redis_server_test.go new file mode 100644 index 00000000..36f6ece4 --- /dev/null +++ b/pkg/redis/sharded/redis_server_test.go @@ -0,0 +1,55 @@ +package sharded + +import ( + "testing" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-test/deep" +) + +func init() { + deep.CompareUnexportedFields = true +} + +func TestNewRedisServerFromParams(t *testing.T) { + type args struct { + connectionString string + alias *string + pool *redis.ServerPool + } + tests := []struct { + name string + args args + want *RedisServer + wantErr bool + }{ + { + name: "Retuns a RedisServer", + args: args{ + connectionString: "redis://127.0.0.1:1000", + alias: util.Pointer("host1"), + pool: redis.NewServerPool(redis.NewServerFromParams("host1", "127.0.0.1", "1000", client.MustNewFromConnectionString("redis://127.0.0.1:1000"))), + }, + want: &RedisServer{ + Server: redis.NewServerFromParams("host1", "127.0.0.1", "1000", client.MustNewFromConnectionString("redis://127.0.0.1:1000")), + Role: client.Unknown, + Config: map[string]string{}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewRedisServerFromPool(tt.args.connectionString, tt.args.alias, tt.args.pool) + if (err != nil) != tt.wantErr { + t.Errorf("NewServerFromParams() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("NewServerFromParams() = got diff %v", diff) + } + }) + } +} diff --git a/pkg/redis/sharded/redis_shard.go b/pkg/redis/sharded/redis_shard.go new file mode 100644 index 00000000..68a952d1 --- /dev/null +++ b/pkg/redis/sharded/redis_shard.go @@ -0,0 +1,251 @@ +package sharded + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Shard is a list of the redis Server objects that compose a redis shard +type Shard struct { + Name string + Servers []*RedisServer + pool *redis.ServerPool +} + +// NewShard returns a Shard object given the passed redis server URLs +func NewShard(name string, servers map[string]string, pool *redis.ServerPool) (*Shard, error) { + var merr util.MultiError + shard := &Shard{Name: name, pool: pool} + shard.Servers = make([]*RedisServer, 0, len(servers)) + + for key, connectionString := range servers { + var alias *string = nil + if key != connectionString { + alias = &key + } + srv, err := NewRedisServerFromPool(connectionString, alias, pool) + if err != nil { + merr = append(merr, err) + continue + } + shard.Servers = append(shard.Servers, srv) + } + + // sort the slice to obtain consistent results + sort.Slice(shard.Servers, func(i, j int) bool { + return shard.Servers[i].ID() < shard.Servers[j].ID() + }) + + return shard, merr.ErrorOrNil() +} + +// Discover retrieves the options for all the servers in the shard +// If a SentinelServer is provided, it will be used to autodiscover servers and roles in the shard +func (shard *Shard) Discover(ctx context.Context, sentinel *SentinelServer, options ...DiscoveryOption) error { + var merr util.MultiError + logger := log.FromContext(ctx, "function", "(*Shard).Discover", "shard", shard.Name) + + switch sentinel { + + // no sentinel provided + case nil: + for idx := range shard.Servers { + if err := shard.Servers[idx].Discover(ctx, options...); err != nil { + logger.Error(err, fmt.Sprintf("unable to discover redis server %s", shard.Servers[idx].ID())) + merr = append(merr, DiscoveryError_UnknownRole_SingleServerFailure{err}) + continue + } + } + + // sentinel provided + default: + sentinelMasterResult, err := sentinel.SentinelMaster(ctx, shard.Name) + if err != nil { + return append(merr, DiscoveryError_Sentinel_Failure{err}) + } + + // Get the corresponding server or add a new one if not found + srv, err := shard.GetServerByID(fmt.Sprintf("%s:%d", sentinelMasterResult.IP, sentinelMasterResult.Port)) + if err != nil { + return append(merr, DiscoveryError_Master_SingleServerFailure{err}) + } + + // do not try to discover a master flagged as "s_down" or "o_down" + if strings.Contains(sentinelMasterResult.Flags, "s_down") || strings.Contains(sentinelMasterResult.Flags, "o_down") { + err := fmt.Errorf("master %s is s_down/o_down", srv.GetAlias()) + logger.Error(err, "master down") + return append(merr, DiscoveryError_Master_SingleServerFailure{err}) + } + + // Confirm the server role + if err = srv.Discover(ctx, options...); err != nil { + srv.Role = client.Role(client.Unknown) + return append(merr, DiscoveryError_Master_SingleServerFailure{err}) + } else if srv.Role != client.Master { + // the role that the server reports is different from the role that + // sentinel sees. Probably the sentinel configuration hasn't converged yet + // this is an error and should be retried + srv.Role = client.Role(client.Unknown) + return append(merr, DiscoveryError_Master_SingleServerFailure{fmt.Errorf("sentinel config has not yet converged for %s", srv.GetAlias())}) + } + + if DiscoveryOptionSet(options).Has(OnlyMasterDiscoveryOpt) { + return merr.ErrorOrNil() + } + + // discover slaves + sentinelSlavesResult, err := sentinel.SentinelSlaves(ctx, shard.Name) + if err != nil { + return append(merr, DiscoveryError_Sentinel_Failure{err}) + } + for _, slave := range sentinelSlavesResult { + + // Get the corresponding server or add a new one if not found + srv, err := shard.GetServerByID(fmt.Sprintf("%s:%d", slave.IP, slave.Port)) + if err != nil { + merr = append(merr, DiscoveryError_Slave_SingleServerFailure{err}) + continue + } + + // do not try to discover a slave flagged as "s_down" or "o_down" + if strings.Contains(slave.Flags, "s_down") || strings.Contains(slave.Flags, "o_down") { + err := fmt.Errorf("slave %s is s_down/o_down", srv.GetAlias()) + log.Log.Error(err, "slave is down") + merr = append(merr, DiscoveryError_Slave_SingleServerFailure{err}) + continue + + } else { + if err := srv.Discover(ctx, options...); err != nil { + srv.Role = client.Role(client.Unknown) + logger.Error(err, fmt.Sprintf("unable to discover redis server %s", srv.GetAlias())) + merr = append(merr, DiscoveryError_Slave_SingleServerFailure{err}) + continue + } + if srv.Role != client.Slave { + // the role that the server reports is different from the role that + // sentinel sees. Probably the sentinel configuration hasn't converged yet + // this is an error and should be retried + srv.Role = client.Role(client.Unknown) + merr = append(merr, DiscoveryError_Slave_SingleServerFailure{fmt.Errorf("sentinel config has not yet converged for %s", srv.GetAlias())}) + continue + } + } + } + } + + return merr.ErrorOrNil() +} + +// GetMaster returns the host:port of the master server +// in a shard or error if zero or more than one master is found +func (shard *Shard) GetMaster() (*RedisServer, error) { + master := []*RedisServer{} + + for _, srv := range shard.Servers { + if srv.Role == client.Master { + master = append(master, srv) + } + } + + if len(master) != 1 { + return nil, util.WrapError("(*Shard).GetMasterAddr", fmt.Errorf("wrong number of masters: %d != 1", len(master))) + } + + return master[0], nil +} + +func (shard *Shard) GetSlavesRW() []*RedisServer { + servers := []*RedisServer{} + for _, srv := range shard.Servers { + if srv.Role == client.Slave { + if val, ok := srv.Config["slave-read-only"]; ok && val == "no" { + servers = append(servers, srv) + } + } + } + sort.Slice(servers, func(i, j int) bool { + return servers[i].ID() < servers[j].ID() + }) + return servers +} + +func (shard *Shard) GetSlavesRO() []*RedisServer { + servers := []*RedisServer{} + for _, srv := range shard.Servers { + if srv.Role == client.Slave { + if val, ok := srv.Config["slave-read-only"]; ok && val == "yes" { + servers = append(servers, srv) + } + } + } + sort.Slice(servers, func(i, j int) bool { + return servers[i].ID() < servers[j].ID() + }) + return servers +} + +func (shard *Shard) GetServerByID(hostport string) (*RedisServer, error) { + var rs *RedisServer + var err error + + for _, srv := range shard.Servers { + if srv.ID() == hostport { + rs = srv + break + } + } + + // If the server is not in the list, add a new one + if rs == nil { + rs, err = NewRedisServerFromPool("redis://"+hostport, nil, shard.pool) + if err != nil { + return nil, err + } + shard.Servers = append(shard.Servers, rs) + } + + return rs, nil +} + +// Init initializes the shard if not already initialized +func (shard *Shard) Init(ctx context.Context, masterHostPort string) ([]string, error) { + merr := util.MultiError{} + listChanged := []string{} + var master *RedisServer + + // Init the master + for _, srv := range shard.Servers { + if srv.ID() == masterHostPort { + master = srv + changed, err := master.InitMaster(ctx) + if err != nil { + return listChanged, append(merr, err) + } + if changed { + listChanged = append(listChanged, master.ID()) + } + } + } + + // Init the slaves + for _, srv := range shard.Servers { + if srv.ID() != masterHostPort { + changed, err := srv.InitSlave(ctx, master) + if err != nil { + merr = append(merr, err) + } + if changed { + listChanged = append(listChanged, srv.ID()) + } + } + } + + return listChanged, merr.ErrorOrNil() +} diff --git a/pkg/redis/sharded/redis_shard_test.go b/pkg/redis/sharded/redis_shard_test.go new file mode 100644 index 00000000..874ff9eb --- /dev/null +++ b/pkg/redis/sharded/redis_shard_test.go @@ -0,0 +1,1047 @@ +package sharded + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-test/deep" +) + +func DiscoveredServersAreEqual(a, b *Shard) (bool, []string) { + if len(a.Servers) != len(b.Servers) { + return false, []string{fmt.Sprintf("different number of servers %d != %d", len(a.Servers), len(b.Servers))} + } + + for idx := range a.Servers { + if a.Servers[idx].Role != b.Servers[idx].Role { + return false, []string{fmt.Sprintf("%s != %s", a.Servers[idx].Role, b.Servers[idx].Role)} + } + if diff := deep.Equal(a.Servers[idx].Config, b.Servers[idx].Config); len(diff) > 0 { + return false, diff + } + } + return true, []string{} +} + +func TestNewShard(t *testing.T) { + type args struct { + name string + servers map[string]string + pool *redis.ServerPool + } + tests := []struct { + name string + args args + want *Shard + wantErr bool + }{ + { + name: "Returns a new Shard object when the pool is empty", + args: args{ + name: "test", + servers: map[string]string{ + "srv0": "redis://127.0.0.1:1000", + "srv1": "redis://127.0.0.1:2000", + "srv2": "redis://127.0.0.1:3000", + }, + pool: redis.NewServerPool(), + }, + want: &Shard{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv0")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv1")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv0")), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + ), + }, + wantErr: false, + }, + { + name: "Returns an error (bad connection string)", + args: args{ + name: "test", + servers: map[string]string{ + "srv0": "redis://127.0.0.1:1000", + "srv1": "127.0.0.1:2000", + "srv2": "redis://127.0.0.1:3000", + }, + pool: redis.NewServerPool(), + }, + want: &Shard{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv0")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv0")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + ), + }, + wantErr: true, + }, + { + name: "Gets servers from the server pool", + args: args{ + name: "test", + servers: map[string]string{ + "redis://127.0.0.1:1000": "redis://127.0.0.1:1000", + "redis://127.0.0.1:2000": "redis://127.0.0.1:2000", + "redis://127.0.0.1:3000": "redis://127.0.0.1:3000", + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + ), + }, + want: &Shard{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv1")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv2")), + ), + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewShard(tt.args.name, tt.args.servers, tt.args.pool) + if (error(err) != nil) != tt.wantErr { + t.Errorf("NewShard() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("NewShard() got diff: %v", diff) + } + }) + } +} + +func TestShard_Discover(t *testing.T) { + type fields struct { + Name string + Servers []*RedisServer + pool *redis.ServerPool + } + type args struct { + ctx context.Context + sentinel *SentinelServer + options DiscoveryOptionSet + } + tests := []struct { + name string + fields fields + args args + want *Shard + wantErr bool + }{ + { + name: "No sentinel: discovers just the roles for all servers in the shard", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool(), + }, + args: args{ + ctx: context.TODO(), + sentinel: nil, + options: DiscoveryOptionSet{}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{}}, + }}, + wantErr: false, + }, + { + name: "No sentinel: second server fails, returns error", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", errors.New("error")), + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool(), + }, + args: args{ + ctx: context.TODO(), + sentinel: nil, + options: DiscoveryOptionSet{}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{}}, + {Role: client.Unknown, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: discovers roles and config options within a shard (all available options)", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("save", nil), + client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{"save": ""}}, + {Role: client.Slave, Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}}, + {Role: client.Slave, Config: map[string]string{"save": "", "slave-read-only": "no"}}, + }}, + wantErr: false, + }, + { + name: "Sentinel: 'sentinel master' command fails ", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000"), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000"), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000"), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return errors.New("error") }, + }, + )), + options: []DiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, + }, + want: &Shard{Name: "test", Servers: []*RedisServer{}}, + wantErr: true, + }, + { + name: "Sentinel: master is down", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000"), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("save", nil), + client.NewPredefinedRedisFakeResponse("slave-read-only-yes", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master,o_down"} + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Unknown, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: master's Discover() fails", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", errors.New("error")), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{}, + }, + want: &Shard{Name: "test", Servers: []*RedisServer{ + {Role: client.Unknown, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: master role reported by sentinel differs from role reported by redis", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{}, + }, + want: &Shard{Name: "test", Servers: []*RedisServer{ + {Role: client.Unknown, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: 'sentinel slaves' command fails", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves + InjectResponse: func() interface{} { + return []interface{}{} + }, + InjectError: func() error { return errors.New("error") }, + }, + )), + options: []DiscoveryOption{}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: a slave is down", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000"), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + client.NewPredefinedRedisFakeResponse("slave-read-only-no", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave,s_down", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{SaveConfigDiscoveryOpt, SlaveReadOnlyDiscoveryOpt}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{"save": ""}}, + {Role: client.Unknown, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{"save": "", "slave-read-only": "no"}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: slave role reported by sentinel differs from role reported by redis", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{}}, + {Role: client.Unknown, Config: map[string]string{}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: Discover() fails for a slave", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("no-save", errors.New("error")), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + client.NewPredefinedRedisFakeResponse("no-save", nil), + ), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{SaveConfigDiscoveryOpt}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{"save": ""}}, + {Role: client.Unknown, Config: map[string]string{}}, + {Role: client.Slave, Config: map[string]string{"save": ""}}, + }}, + wantErr: true, + }, + { + name: "Sentinel: Discover() only masters", + fields: fields{ + Name: "test", + Servers: []*RedisServer{}, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000"), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000"), + ), + }, + args: args{ + ctx: context.TODO(), + sentinel: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + // cmd: SentinelMaster + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "test", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + )), + options: []DiscoveryOption{OnlyMasterDiscoveryOpt}, + }, + want: &Shard{Name: "test", + Servers: []*RedisServer{ + {Role: client.Master, Config: map[string]string{}}, + }}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &Shard{ + Name: tt.fields.Name, + Servers: tt.fields.Servers, + pool: tt.fields.pool, + } + if err := s.Discover(tt.args.ctx, tt.args.sentinel, tt.args.options...); (err != nil) != tt.wantErr { + t.Errorf("Shard.Discover() error = %v, wantErr %v", err, tt.wantErr) + } + if equal, diff := DiscoveredServersAreEqual(s, tt.want); !equal { + t.Errorf("Shard.Discover() got diff = %v", diff) + } + }) + } +} + +func TestShard_Init(t *testing.T) { + type fields struct { + Name string + Servers []*RedisServer + } + type args struct { + ctx context.Context + masterHostPort string + } + tests := []struct { + name string + fields fields + args args + want []string + wantErr bool + }{ + // { + // name: "All redis servers configured", + // fields: fields{ + // Name: "test", + // Servers: []*RedisServer{ + // NewRedisServerFromParams( + // redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + // client.FakeResponse{ + // InjectResponse: func() interface{} { + // return []interface{}{"slave", "127.0.0.1"} + // }, + // InjectError: func() error { return nil }, + // }, + // client.FakeResponse{ + // InjectResponse: func() interface{} { return nil }, + // InjectError: func() error { return nil }, + // }, + // client.NewPredefinedRedisFakeResponse("role-master", nil), + // client.NewPredefinedRedisFakeResponse("role-master", nil), + // ), + // client.Unknown, + // map[string]string{}, + // ), + // NewRedisServerFromParams( + // redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + // client.FakeResponse{ + // InjectResponse: func() interface{} { + // return []interface{}{"slave", "127.0.0.1"} + // }, + // InjectError: func() error { return nil }, + // }, + // client.FakeResponse{ + // InjectResponse: func() interface{} { return nil }, + // InjectError: func() error { return nil }, + // }, + // ), + // client.Unknown, + // map[string]string{}, + // ), + // NewRedisServerFromParams( + // redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + // client.FakeResponse{ + // InjectResponse: func() interface{} { + // return []interface{}{"slave", "127.0.0.1"} + // }, + // InjectError: func() error { return nil }, + // }, + // client.FakeResponse{ + // InjectResponse: func() interface{} { return nil }, + // InjectError: func() error { return nil }, + // }, + // ), + // client.Unknown, + // map[string]string{}, + // ), + // }, + // }, + // args: args{ctx: context.TODO(), masterHostPort: "127.0.0.1:1000"}, + // want: []string{"127.0.0.1:1000", "127.0.0.1:2000", "127.0.0.1:3000"}, + // wantErr: false, + // }, + { + name: "No configuration needed", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"slave", "10.0.0.1"} + }, + InjectError: func() error { return nil }, + }, + ), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{"slave", "10.0.0.1"} + }, + InjectError: func() error { return nil }, + }, + ), + client.Unknown, + map[string]string{}, + ), + }, + }, + args: args{ctx: context.TODO(), masterHostPort: "127.0.0.1:1000"}, + want: []string{}, + wantErr: false, + }, + { + name: "Returns error", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.FakeResponse{ + InjectResponse: func() interface{} { return []interface{}{} }, + InjectError: func() error { return errors.New("error") }, + }, + ), + client.Unknown, + map[string]string{}, + ), + }, + }, + args: args{ctx: context.TODO(), masterHostPort: "127.0.0.1:1000"}, + want: []string{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &Shard{ + Name: tt.fields.Name, + Servers: tt.fields.Servers, + } + got, err := s.Init(tt.args.ctx, tt.args.masterHostPort) + if (err != nil) != tt.wantErr { + t.Errorf("Shard.Init() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Shard.Init() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestShard_GetMasterAddr(t *testing.T) { + type fields struct { + Name string + Servers []*RedisServer + pool *redis.ServerPool + } + tests := []struct { + name string + fields fields + want string + wantErr bool + }{ + { + name: "Returns the master's address", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + client.Master, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.2:2000", util.Pointer("srv1")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.3:3000", util.Pointer("srv2")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool(), + }, + want: "127.0.0.1:1000", + wantErr: false, + }, + { + name: "Error, no master", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.2:2000", util.Pointer("srv1")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.3:3000", util.Pointer("srv2")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool(), + }, + want: "", + wantErr: true, + }, + { + name: "Error, more than one master", + fields: fields{ + Name: "test", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", nil), + client.Master, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.2:2000", util.Pointer("srv1")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.3:3000", util.Pointer("srv2")), + client.Master, + map[string]string{}, + ), + }, + pool: redis.NewServerPool(), + }, + want: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + shard := &Shard{ + Name: tt.fields.Name, + Servers: tt.fields.Servers, + pool: tt.fields.pool, + } + got, err := shard.GetMaster() + if (err != nil) != tt.wantErr { + t.Errorf("Shard.GetMasterAddr() error = %v, wantErr %v", err, tt.wantErr) + } + if err == nil && got.ID() != tt.want { + t.Errorf("Shard.GetMasterAddr() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestShard_GetServerByID(t *testing.T) { + type args struct { + hostport string + } + tests := []struct { + name string + servers map[string]string + args args + wantIndex int + wantErr bool + }{ + { + name: "Resturns a server", + servers: map[string]string{ + "host1": "redis://127.0.0.1:1000", + "host2": "redis://127.0.0.1:2000", + }, + args: args{ + hostport: "127.0.0.1:1000", + }, + wantIndex: 0, + wantErr: false, + }, + { + name: "Adds a server", + servers: map[string]string{ + "host1": "redis://127.0.0.1:1000", + "host2": "redis://127.0.0.1:2000", + }, + args: args{ + hostport: "127.0.0.1:3000", + }, + wantIndex: 2, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + shard, _ := NewShard("test", tt.servers, redis.NewServerPool()) + got, err := shard.GetServerByID(tt.args.hostport) + if (err != nil) != tt.wantErr { + t.Errorf("Shard.GetServerByID() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != shard.Servers[tt.wantIndex] { + t.Errorf("Shard.GetServerByID() = %v, want %v", got, shard.Servers[tt.wantIndex]) + } + }) + } +} diff --git a/pkg/redis/sharded/redis_sharded_cluster.go b/pkg/redis/sharded/redis_sharded_cluster.go new file mode 100644 index 00000000..75698c17 --- /dev/null +++ b/pkg/redis/sharded/redis_sharded_cluster.go @@ -0,0 +1,170 @@ +package sharded + +import ( + "context" + "fmt" + "sort" + "time" + + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Cluster represents a sharded redis cluster, composed by several Shards +type Cluster struct { + Shards []*Shard + Sentinels []*SentinelServer + pool *redis.ServerPool +} + +// NewShardedCluster returns a new ShardedCluster given the shard structure passed as a map[string][]string +func NewShardedCluster(ctx context.Context, serverList map[string]map[string]string, pool *redis.ServerPool) (*Cluster, error) { + logger := log.FromContext(ctx, "function", "NewShardedCluster") + cluster := Cluster{pool: pool} + cluster.Shards = make([]*Shard, 0, len(serverList)) + + for shardName, shardServers := range serverList { + + switch shardName { + + case "sentinel": + sentinels, err := NewHighAvailableSentinel(serverList["sentinel"], pool) + if err != nil { + return nil, err + } + cluster.Sentinels = sentinels + + default: + shard, err := NewShard(shardName, shardServers, pool) + if err != nil { + logger.Error(err, "unable to create sharded cluster") + return nil, err + } + cluster.Shards = append(cluster.Shards, shard) + + } + } + + // sort the slices to obtain consistent results + sort.Slice(cluster.Shards, func(i, j int) bool { + return cluster.Shards[i].Name < cluster.Shards[j].Name + }) + sort.Slice(cluster.Sentinels, func(i, j int) bool { + return cluster.Sentinels[i].ID() < cluster.Sentinels[j].ID() + }) + + return &cluster, nil +} + +func (cluster *Cluster) GetShardNames() []string { + shards := make([]string, len(cluster.Shards)) + for i, shard := range cluster.Shards { + shards[i] = shard.Name + } + sort.Strings(shards) + return shards +} + +func (cluster Cluster) GetShardByName(name string) *Shard { + for _, shard := range cluster.Shards { + if shard.Name == name { + return shard + } + } + return nil +} + +func (cluster *Cluster) Discover(ctx context.Context, options ...DiscoveryOption) error { + var merr util.MultiError + + for _, shard := range cluster.Shards { + if err := shard.Discover(ctx, nil, options...); err != nil { + merr = append(merr, err) + continue + } + } + return merr.ErrorOrNil() +} + +// Updates the status of the cluster as seen from sentinel +func (cluster *Cluster) SentinelDiscover(ctx context.Context, opts ...DiscoveryOption) error { + merr := util.MultiError{} + + // Get a healthy sentinel server + sentinel := cluster.GetSentinel(ctx) + if sentinel == nil { + return append(merr, fmt.Errorf("unable to find a healthy sentinel server")) + } + + masters, err := sentinel.SentinelMasters(ctx) + if err != nil { + return append(merr, err) + } + + for _, master := range masters { + + // Get the corresponding shard + shard := cluster.GetShardByName(master.Name) + + // Add the shard if not already present + if shard == nil { + shard = &Shard{ + Name: master.Name, + Servers: []*RedisServer{}, + pool: cluster.pool, + } + cluster.Shards = append(cluster.Shards, shard) + } + + if err := shard.Discover(ctx, sentinel, opts...); err != nil { + merr = append(merr, ShardDiscoveryError{ShardName: master.Name, Errors: err.(util.MultiError)}) + // keep going with the other shards + continue + } + } + return merr.ErrorOrNil() +} + +// GetSentinel returns a healthy SentinelServer from the list of sentinels +// Returns nil if no healthy SentinelServer was found +func (cluster *Cluster) GetSentinel(pctx context.Context) *SentinelServer { + ctx, cancel := context.WithTimeout(pctx, 5*time.Second) + defer cancel() + + ch := make(chan int) + for idx := range cluster.Sentinels { + go func(i int) { + defer func() { + if r := recover(); r != nil { + return + } + }() + if err := cluster.Sentinels[i].SentinelPing(ctx); err == nil { + ch <- i + } + }(idx) + } + + select { + case <-ctx.Done(): + case idx := <-ch: + close(ch) + return cluster.Sentinels[idx] + } + + return nil +} + +type ShardDiscoveryError struct { + ShardName string + Errors util.MultiError +} + +func (e ShardDiscoveryError) Error() string { + return fmt.Sprintf("errors occurred for shard %s: '%s'", e.ShardName, e.Errors) +} + +func (e ShardDiscoveryError) Unwrap() []error { + return []error(e.Errors) +} diff --git a/pkg/redis/sharded/redis_sharded_cluster_test.go b/pkg/redis/sharded/redis_sharded_cluster_test.go new file mode 100644 index 00000000..aea2d64c --- /dev/null +++ b/pkg/redis/sharded/redis_sharded_cluster_test.go @@ -0,0 +1,826 @@ +package sharded + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-test/deep" +) + +func TestNewShardedCluster(t *testing.T) { + type args struct { + ctx context.Context + serverList map[string]map[string]string + pool *redis.ServerPool + } + tests := []struct { + name string + args args + want *Cluster + wantErr bool + }{ + { + name: "Returns a new ShardedCluster object", + args: args{ + ctx: context.TODO(), + serverList: map[string]map[string]string{ + "shard00": {"srv00-0": "redis://127.0.0.1:1000", "srv00-1": "redis://127.0.0.1:2000"}, + "shard01": {"srv01-0": "redis://127.0.0.1:3000", "srv01-1": "redis://127.0.0.1:4000"}, + "sentinel": {"sentinel-0": "redis://127.0.0.1:5000", "sentinel-1": "redis://127.0.0.1:6000"}, + }, + pool: redis.NewServerPool(), + }, + want: &Cluster{ + Shards: []*Shard{ + { + Name: "shard00", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv00-0")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv00-1")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv00-0")), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv00-1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv01-0")), + redis.MustNewServer("redis://127.0.0.1:4000", util.Pointer("srv01-1")), + redis.MustNewServer("redis://127.0.0.1:5000", util.Pointer("sentinel-0")), + redis.MustNewServer("redis://127.0.0.1:6000", util.Pointer("sentinel-1")), + ), + }, + { + Name: "shard01", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv01-0")), + client.Unknown, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:4000", util.Pointer("srv01-1")), + client.Unknown, + map[string]string{}, + ), + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv00-0")), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv00-1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv01-0")), + redis.MustNewServer("redis://127.0.0.1:4000", util.Pointer("srv01-1")), + redis.MustNewServer("redis://127.0.0.1:5000", util.Pointer("sentinel-0")), + redis.MustNewServer("redis://127.0.0.1:6000", util.Pointer("sentinel-1")), + ), + }, + }, + Sentinels: []*SentinelServer{ + {Server: redis.MustNewServer("redis://127.0.0.1:5000", util.Pointer("sentinel-0"))}, + {Server: redis.MustNewServer("redis://127.0.0.1:6000", util.Pointer("sentinel-1"))}, + }, + pool: redis.NewServerPool( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv00-0")), + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv00-1")), + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("srv01-0")), + redis.MustNewServer("redis://127.0.0.1:4000", util.Pointer("srv01-1")), + redis.MustNewServer("redis://127.0.0.1:5000", util.Pointer("sentinel-0")), + redis.MustNewServer("redis://127.0.0.1:6000", util.Pointer("sentinel-1")), + ), + }, + wantErr: false, + }, + { + name: "Returns error", + args: args{ + ctx: context.TODO(), + serverList: map[string]map[string]string{ + "shard00": {"srv00-0": "redis://127.0.0.1:1000", "srv00-1": "redis://127.0.0.1:2000"}, + "shard01": {"srv01-0": "127.0.0.1:3000", "srv01-1": "redis://127.0.0.1:4000"}, + }, + pool: redis.NewServerPool(), + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewShardedCluster(tt.args.ctx, tt.args.serverList, tt.args.pool) + if (err != nil) != tt.wantErr { + t.Errorf("NewShardedCluster() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("NewShardedCluster() got diff: %v", diff) + } + }) + } +} + +func TestShardedCluster_GetShardNames(t *testing.T) { + tests := []struct { + name string + sc Cluster + want []string + }{ + { + name: "Returns the shrard names as a slice of strings", + sc: Cluster{ + Shards: []*Shard{ + { + Name: "shard00", + Servers: []*RedisServer{}, + }, + { + Name: "shard01", + Servers: []*RedisServer{}, + }, + { + Name: "shard02", + Servers: []*RedisServer{}, + }, + }, + }, + want: []string{"shard00", "shard01", "shard02"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.sc.GetShardNames(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ShardedCluster.GetShardNames() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestShardedCluster_GetShardByName(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + sc Cluster + args args + want *Shard + }{ + { + name: "Returns the shard of the given name", + sc: Cluster{ + Shards: []*Shard{ + { + Name: "shard00", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("srv00-0")), + client.Unknown, + map[string]string{}, + ), + }, + }, + { + Name: "shard01", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv01-0")), + client.Unknown, + map[string]string{}, + ), + }, + }, + }, + }, + args: args{ + name: "shard01", + }, + want: &Shard{ + Name: "shard01", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("srv01-0")), + client.Unknown, + map[string]string{}, + ), + }, + }, + }, + { + name: "Returns nil if not found", + sc: Cluster{}, + args: args{ + name: "shard01", + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if diff := deep.Equal(tt.sc.GetShardByName(tt.args.name), tt.want); len(diff) > 0 { + t.Errorf("ShardedCluster.GetShardByName() got diff: %v", diff) + } + }) + } +} + +func TestCluster_Discover(t *testing.T) { + type fields struct { + Shards []*Shard + Sentinels []*SentinelServer + pool *redis.ServerPool + } + type args struct { + ctx context.Context + options []DiscoveryOption + } + tests := []struct { + name string + fields fields + pool *redis.ServerPool + args args + wantErr bool + }{ + { + name: "Discovers roles for all cluster servers", + fields: fields{ + Shards: []*Shard{ + { + Name: "shard0", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + { + Name: "shard1", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "4000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "5000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "6000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + }, + Sentinels: []*SentinelServer{}, + pool: &redis.ServerPool{}, + }, + args: args{ + ctx: context.TODO(), + options: []DiscoveryOption{}, + }, + wantErr: false, + }, + { + name: "Returns error", + fields: fields{ + Shards: []*Shard{ + { + Name: "shard0", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", errors.New("error")), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + }, + }, + pool: &redis.ServerPool{}, + args: args{ + ctx: context.TODO(), + options: []DiscoveryOption{}, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster := Cluster{ + Shards: tt.fields.Shards, + Sentinels: tt.fields.Sentinels, + pool: tt.fields.pool, + } + if err := cluster.Discover(tt.args.ctx, tt.args.options...); (err != nil) != tt.wantErr { + t.Errorf("Cluster.Discover() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestCluster_SentinelDiscover(t *testing.T) { + type fields struct { + Shards []*Shard + Sentinels []*SentinelServer + pool *redis.ServerPool + } + type args struct { + ctx context.Context + opts []DiscoveryOption + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "Discovers roles for all cluster servers using sentinel", + fields: fields{ + Shards: []*Shard{ + { + Name: "shard0", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + { + Name: "shard1", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "4000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "5000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "6000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + }, + Sentinels: []*SentinelServer{ + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-0", "1000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves (shard0) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves (shard1) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:4000", + "ip", "127.0.0.1", + "port", "4000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:6000", + "ip", "127.0.0.1", + "port", "6000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-1", "2000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("ping failed") }, + }, + )), + }, + pool: &redis.ServerPool{}, + }, + args: args{ + ctx: context.TODO(), + opts: []DiscoveryOption{}, + }, + wantErr: false, + }, + { + name: "Discovers shards when not provided", + fields: fields{ + Shards: nil, + Sentinels: []*SentinelServer{ + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-0", "1000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + []interface{}{"name", "shard1", "ip", "127.0.0.1", "port", "5000"}, + } + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves (shard0) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMaster (shard1) + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "shard1", IP: "127.0.0.1", Port: 5000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves (shard1) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:4000", + "ip", "127.0.0.1", + "port", "4000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:6000", + "ip", "127.0.0.1", + "port", "6000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-1", "2000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("ping failed") }, + }, + )), + }, + pool: redis.NewServerPool( + redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", client.NewPredefinedRedisFakeResponse("role-master", nil)), + redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", client.NewPredefinedRedisFakeResponse("role-slave", nil)), + redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", client.NewPredefinedRedisFakeResponse("role-slave", nil)), + redis.NewFakeServerWithFakeClient("127.0.0.1", "4000", client.NewPredefinedRedisFakeResponse("role-slave", nil)), + redis.NewFakeServerWithFakeClient("127.0.0.1", "5000", client.NewPredefinedRedisFakeResponse("role-master", nil)), + redis.NewFakeServerWithFakeClient("127.0.0.1", "6000", client.NewPredefinedRedisFakeResponse("role-slave", nil)), + ), + }, + args: args{ + ctx: context.TODO(), + opts: []DiscoveryOption{}, + }, + wantErr: false, + }, + { + name: "Error discovering server", + fields: fields{ + Shards: []*Shard{ + { + Name: "shard0", + Servers: []*RedisServer{ + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + client.NewPredefinedRedisFakeResponse("role-master", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + client.NewPredefinedRedisFakeResponse("role-slave", nil), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + { + Server: redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + client.NewPredefinedRedisFakeResponse("role-slave", errors.New("error")), + ), + Role: client.Unknown, + Config: map[string]string{}, + }, + }, + pool: redis.NewServerPool(), + }, + }, + Sentinels: []*SentinelServer{ + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-0", "1000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMasters() + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard0", "ip", "127.0.0.1", "port", "1000"}, + } + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelMaster (shard0) + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{Name: "shard0", IP: "127.0.0.1", Port: 1000, Flags: "master"} + }, + InjectError: func() error { return nil }, + }, + client.FakeResponse{ + // cmd: SentinelSlaves (shard0) + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{ + "name", "127.0.0.1:2000", + "ip", "127.0.0.1", + "port", "2000", + "flags", "slave", + }, + []interface{}{ + "name", "127.0.0.1:3000", + "ip", "127.0.0.1", + "port", "3000", + "flags", "slave", + }, + } + }, + InjectError: func() error { return nil }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("sentinel-1", "2000", + client.FakeResponse{ + // cmd: Ping + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("ping failed") }, + }, + )), + }, + pool: &redis.ServerPool{}, + }, + args: args{ + ctx: context.TODO(), + opts: []DiscoveryOption{}, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster := Cluster{ + Shards: tt.fields.Shards, + Sentinels: tt.fields.Sentinels, + pool: tt.fields.pool, + } + if err := cluster.SentinelDiscover(tt.args.ctx, tt.args.opts...); (err != nil) != tt.wantErr { + t.Errorf("Cluster.SentinelDiscover() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestCluster_GetSentinel(t *testing.T) { + type fields struct { + Shards []*Shard + Sentinels []*SentinelServer + pool *redis.ServerPool + } + type args struct { + pctx context.Context + } + tests := []struct { + name string + fields fields + args args + want *SentinelServer + }{ + { + name: "Returns the first sentinel", + fields: fields{ + Shards: []*Shard{}, + Sentinels: []*SentinelServer{ + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("error") }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("error") }, + }, + )), + }, + pool: &redis.ServerPool{}, + }, + args: args{ + pctx: context.TODO(), + }, + want: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "1000")), + }, + { + name: "Returns the third sentinel", + fields: fields{ + Shards: []*Shard{}, + Sentinels: []*SentinelServer{ + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "1000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("error") }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "2000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return errors.New("error") }, + }, + )), + NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "3000", + // cmd: ping + client.FakeResponse{ + InjectResponse: func() interface{} { return nil }, + InjectError: func() error { return nil }, + }, + )), + }, + pool: &redis.ServerPool{}, + }, + args: args{ + pctx: context.TODO(), + }, + want: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("127.0.0.1", "3000")), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster := Cluster{ + Shards: tt.fields.Shards, + Sentinels: tt.fields.Sentinels, + pool: tt.fields.pool, + } + got := cluster.GetSentinel(tt.args.pctx) + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("Cluster.GetSentinel() = got diff %v", diff) + } + }) + } +} diff --git a/pkg/redis/sharded/sentinel_server.go b/pkg/redis/sharded/sentinel_server.go new file mode 100644 index 00000000..e93534bb --- /dev/null +++ b/pkg/redis/sharded/sentinel_server.go @@ -0,0 +1,128 @@ +package sharded + +import ( + "context" + "sort" + + saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" +) + +const ( + shardNotInitializedError = "ERR No such master with that name" +) + +// SentinelServer represents a sentinel Pod +type SentinelServer struct { + *redis.Server +} + +func NewSentinelServerFromPool(connectionString string, alias *string, pool *redis.ServerPool) (*SentinelServer, error) { + srv, err := pool.GetServer(connectionString, alias) + if err != nil { + return nil, err + } + + return &SentinelServer{ + Server: srv, + }, nil +} + +func NewSentinelServerFromParams(srv *redis.Server) *SentinelServer { + return &SentinelServer{ + Server: srv, + } +} + +func NewHighAvailableSentinel(servers map[string]string, pool *redis.ServerPool) ([]*SentinelServer, error) { + var merr util.MultiError + sentinels := make([]*SentinelServer, 0, len(servers)) + + for key, connectionString := range servers { + var alias *string = nil + if key != connectionString { + alias = &key + } + srv, err := NewSentinelServerFromPool(connectionString, alias, pool) + if err != nil { + merr = append(merr, err) + continue + } + sentinels = append(sentinels, srv) + } + + sort.Slice(sentinels, func(i, j int) bool { + return sentinels[i].ID() < sentinels[j].ID() + }) + + return sentinels, merr.ErrorOrNil() +} + +// IsMonitoringShards checks whether or all the shards in the passed list are being monitored by the SentinelServer +func (sentinel *SentinelServer) IsMonitoringShards(ctx context.Context, shards []string) (bool, error) { + + monitoredShards, err := sentinel.SentinelMasters(ctx) + if err != nil { + return false, err + } + + if len(monitoredShards) == 0 { + return false, nil + } + + for _, name := range shards { + found := false + for _, monitored := range monitoredShards { + if monitored.Name == name { + found = true + } + } + if !found { + return false, nil + } + } + + return true, nil +} + +// Monitor ensures that all the shards in the ShardedCluster object are monitored by the SentinelServer +func (sentinel *SentinelServer) Monitor(ctx context.Context, cluster *Cluster) ([]string, error) { + changed := []string{} + + // Initialize unmonitored shards + shardNames := cluster.GetShardNames() + for _, name := range shardNames { + + _, err := sentinel.SentinelMaster(ctx, name) + if err != nil { + if err.Error() == shardNotInitializedError { + + shard := cluster.GetShardByName(name) + master, err := shard.GetMaster() + if err != nil { + return changed, err + } + + err = sentinel.SentinelMonitor(ctx, name, master.GetHost(), master.GetPort(), saasv1alpha1.SentinelDefaultQuorum) + if err != nil { + return changed, util.WrapError("redis-sentinel/SentinelServer.Monitor", err) + } + // even if the next call fails, there has already been a write operation to sentinel + changed = append(changed, name) + + err = sentinel.SentinelSet(ctx, name, "down-after-milliseconds", "5000") + if err != nil { + return changed, util.WrapError("redis-sentinel/SentinelServer.Monitor", err) + } + // TODO: change the default failover timeout. + // TODO: maybe add a generic mechanism to set/modify parameters + + } else { + return changed, err + } + } + } + + return changed, nil +} diff --git a/pkg/redis/sharded/sentinel_server_test.go b/pkg/redis/sharded/sentinel_server_test.go new file mode 100644 index 00000000..0ebcec46 --- /dev/null +++ b/pkg/redis/sharded/sentinel_server_test.go @@ -0,0 +1,601 @@ +package sharded + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/3scale/saas-operator/pkg/redis/client" + redis "github.com/3scale/saas-operator/pkg/redis/server" + "github.com/3scale/saas-operator/pkg/util" + "github.com/go-test/deep" +) + +var ( + testShardedCluster *Cluster = &Cluster{ + Shards: []*Shard{ + { + Name: "shard00", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("shard00-0")), + client.Master, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2001", util.Pointer("shard00-1")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2002", util.Pointer("shard00-2")), + client.Slave, + map[string]string{}, + ), + }, + }, + { + Name: "shard01", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3000", util.Pointer("shard01-0")), + client.Master, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3001", util.Pointer("shard01-1")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:3002", util.Pointer("shard01-2")), + client.Slave, + map[string]string{}, + ), + }, + }, + { + Name: "shard02", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:4000", util.Pointer("shard02-0")), + client.Master, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:4001", util.Pointer("shard02-1")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:4002", util.Pointer("shard02-2")), + client.Slave, + map[string]string{}, + ), + }, + }, + }, + } +) + +func init() { + deep.CompareUnexportedFields = true +} + +func TestNewSentinelServerFromPool(t *testing.T) { + type args struct { + connectionString string + alias *string + pool *redis.ServerPool + } + tests := []struct { + name string + args args + want *SentinelServer + wantErr bool + }{ + { + name: "Returns a SentinelServer", + args: args{ + connectionString: "redis://127.0.0.1:1000", + alias: util.Pointer("sentinel"), + pool: &redis.ServerPool{}, + }, + want: &SentinelServer{ + Server: redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("sentinel")), + }, + wantErr: false, + }, + { + name: "Gets server from pool", + args: args{ + connectionString: "redis://127.0.0.1:1000", + alias: nil, + pool: redis.NewServerPool(redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("sentinel"))), + }, + want: &SentinelServer{ + Server: redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("sentinel")), + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewSentinelServerFromPool(tt.args.connectionString, tt.args.alias, tt.args.pool) + if (err != nil) != tt.wantErr { + t.Errorf("NewSentinelServerFromPool() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("NewSentinelServerFromPool() = got diff %v", diff) + } + }) + } +} + +func TestNewHighAvailableSentinel(t *testing.T) { + type args struct { + servers map[string]string + pool *redis.ServerPool + } + tests := []struct { + name string + args args + want []*SentinelServer + wantErr bool + }{ + { + name: "Returns a list of sentinels", + args: args{ + servers: map[string]string{ + "sentinel-0": "redis://127.0.0.1:1000", + "sentinel-1": "redis://127.0.0.1:2000", + }, + pool: &redis.ServerPool{}, + }, + want: []*SentinelServer{ + {Server: redis.MustNewServer("redis://127.0.0.1:1000", util.Pointer("sentinel-0"))}, + {Server: redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("sentinel-1"))}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewHighAvailableSentinel(tt.args.servers, tt.args.pool) + if (err != nil) != tt.wantErr { + t.Errorf("NewHighAvailableSentinel() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(got, tt.want); len(diff) > 0 { + t.Errorf("NewHighAvailableSentinel() = got diff %v", diff) + } + }) + } +} + +func TestSentinelServer_IsMonitoringShards(t *testing.T) { + type args struct { + ctx context.Context + shards []string + } + tests := []struct { + name string + ss *SentinelServer + args args + want bool + wantErr bool + }{ + { + name: "All shards monitored by SentinelServer", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard01"}, + []interface{}{"name", "shard02"}, + } + }, + InjectError: func() error { return nil }, + })), + args: args{ + ctx: context.TODO(), + shards: []string{"shard01", "shard02"}, + }, + want: true, + wantErr: false, + }, + { + name: "No shard monitored", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + InjectResponse: func() interface{} { return []interface{}{} }, + InjectError: func() error { return nil }, + })), + args: args{ + ctx: context.TODO(), + shards: []string{"shard01", "shard02"}, + }, + want: false, + wantErr: false, + }, + { + name: "One shard is not monitored", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + InjectResponse: func() interface{} { + return []interface{}{ + []interface{}{"name", "shard01"}, + } + }, + InjectError: func() error { return nil }, + })), + args: args{ + ctx: context.TODO(), + shards: []string{"shard01", "shard02"}, + }, + want: false, + wantErr: false, + }, + { + name: "Returns an error", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + InjectResponse: func() interface{} { return []interface{}{} }, + InjectError: func() error { return errors.New("error") }, + })), + args: args{ + ctx: context.TODO(), + shards: []string{"shard01", "shard02"}, + }, + want: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.ss.IsMonitoringShards(tt.args.ctx, tt.args.shards) + if (err != nil) != tt.wantErr { + t.Errorf("SentinelServer.IsMonitoringShards() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("SentinelServer.IsMonitoringShards() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestSentinelServer_Monitor(t *testing.T) { + type args struct { + ctx context.Context + shards *Cluster + } + tests := []struct { + name string + ss *SentinelServer + args args + want []string + wantErr bool + }{ + { + name: "All shards monitored", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard00", + IP: "127.0.0.1", + Port: 2000, + } + }, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard01 + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard01", + IP: "127.0.0.1", + Port: 3000, + } + }, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard02 + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard02", + IP: "127.0.0.1", + Port: 4000, + } + }, + InjectError: func() error { return nil }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{}, + wantErr: false, + }, + { + name: "shard01 is not monitored", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard00", + IP: "127.0.0.1", + Port: 2000, + } + }, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard01 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard01 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard01 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard02 + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard02", + IP: "127.0.0.1", + Port: 4000, + } + }, + InjectError: func() error { return nil }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{"shard01"}, + wantErr: false, + }, + { + name: "all shards are unmonitored", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard01 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard01 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard01 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard02 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard02 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard02 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{"shard00", "shard01", "shard02"}, + wantErr: false, + }, + { + name: "All shards unmonitored, failure on the 2nd one", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard01 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New("error") }, + }, + // SentinelMaster response for shard02 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard02 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard02 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{"shard00"}, + wantErr: true, + }, + { + name: "All shards monitored, failure on the 2nd one", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { + return &client.SentinelMasterCmdResult{ + Name: "shard00", + IP: "127.0.0.1", + Port: 2000, + } + }, + InjectError: func() error { return nil }, + }, + // SentinelMaster response for shard01 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New("error") }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{}, + wantErr: true, + }, + { + name: "'sentinel monitor' fails for shard00, returns no shards changed", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return errors.New("error") }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{}, + wantErr: true, + }, + { + name: "Error writing config param, returns shard00 changed", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + // SentinelMonitor response for shard00 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return nil }, + }, + // SentinelSet response for shard01 + client.FakeResponse{ + InjectResponse: nil, + InjectError: func() error { return errors.New("error") }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: testShardedCluster, + }, + want: []string{"shard00"}, + wantErr: true, + }, + { + name: "No master found, returns error, no shards changed", + ss: NewSentinelServerFromParams(redis.NewFakeServerWithFakeClient("host", "port", + // SentinelMaster response for shard00 (returns error as it is unmonitored) + client.FakeResponse{ + InjectResponse: func() interface{} { return &client.SentinelMasterCmdResult{} }, + InjectError: func() error { return errors.New(shardNotInitializedError) }, + }, + )), + args: args{ + ctx: context.TODO(), + shards: &Cluster{ + Shards: []*Shard{ + { + Name: "shard00", + Servers: []*RedisServer{ + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2000", util.Pointer("shard00-0")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2001", util.Pointer("shard00-1")), + client.Slave, + map[string]string{}, + ), + NewRedisServerFromParams( + redis.MustNewServer("redis://127.0.0.1:2002", util.Pointer("shard00-2")), + client.Slave, + map[string]string{}, + ), + }, + }, + }, + }, + }, + want: []string{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.ss.Monitor(tt.args.ctx, tt.args.shards) + if (err != nil) != tt.wantErr { + t.Errorf("SentinelServer.Monitor() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("SentinelServer.Monitor() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/resource_builders/twemproxy/config.go b/pkg/resource_builders/twemproxy/config.go index 8ea6b783..7960445d 100644 --- a/pkg/resource_builders/twemproxy/config.go +++ b/pkg/resource_builders/twemproxy/config.go @@ -14,11 +14,20 @@ const ( ) type Server struct { + alias string Address string Priority int Name string } +func NewServer(hostport string, alias string) Server { + return Server{ + alias: alias, + Address: hostport, + Priority: 1, + } +} + func (srv *Server) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf("\"%s:%d %s\"", srv.Address, srv.Priority, srv.Name)), nil } @@ -36,6 +45,13 @@ func (srv *Server) UnmarshalJSON(data []byte) error { return nil } +func (srv *Server) Alias() string { + if srv.alias != "" { + return srv.alias + } + return srv.Address +} + type ServerPoolConfig struct { Listen string `json:"listen"` Hash string `json:"hash,omitempty"` diff --git a/pkg/resource_builders/twemproxy/config_test.go b/pkg/resource_builders/twemproxy/config_test.go index c5968cb2..a872593c 100644 --- a/pkg/resource_builders/twemproxy/config_test.go +++ b/pkg/resource_builders/twemproxy/config_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestTwemproxyServer_MarshalJSON(t *testing.T) { @@ -90,7 +91,7 @@ func TestTwemproxyServer_UnmarshalJSON(t *testing.T) { } return } else { - if diff := cmp.Diff(srv, tt.want); len(diff) != 0 { + if diff := cmp.Diff(srv, tt.want, cmpopts.IgnoreUnexported(Server{})); len(diff) != 0 { t.Fatalf("TwemproxyServer.UnmarshalJSON() diff = %v", diff) } } diff --git a/pkg/util/dns.go b/pkg/util/dns.go new file mode 100644 index 00000000..c22bd21a --- /dev/null +++ b/pkg/util/dns.go @@ -0,0 +1,26 @@ +package util + +import ( + "context" + "net" +) + +// LookupIPv4 resolves the given hostname to a IPv4. +// WARNING: It only returns the first IP even if the hostname resolves to several +func LookupIPv4(ctx context.Context, host string) (string, error) { + var ip string + if r := net.ParseIP(host); r != nil { + ip = r.String() + } else { + // if it is not an IP, try to resolve it + ips, err := net.DefaultResolver.LookupIP(ctx, "ip4", host) + if err != nil { + return "", err + } + + // return only the firt IP returned + ip = ips[0].String() + } + + return ip, nil +} diff --git a/pkg/util/error.go b/pkg/util/error.go index 1590eb5c..cf487264 100644 --- a/pkg/util/error.go +++ b/pkg/util/error.go @@ -30,3 +30,14 @@ func (me MultiError) Error() string { b, _ := json.Marshal(list) return string(b) } + +func (me MultiError) Unwrap() []error { + return []error(me) +} + +func (me MultiError) ErrorOrNil() error { + if len(me) > 0 { + return me + } + return nil +} diff --git a/pkg/util/pointer.go b/pkg/util/pointer.go index 8c65bd4f..20781858 100644 --- a/pkg/util/pointer.go +++ b/pkg/util/pointer.go @@ -15,3 +15,7 @@ func Metav1DurationPtr(value time.Duration) *metav1.Duration { d := &metav1.Duration{Duration: value} return d } + +func Pointer[T any](t T) *T { + return &t +} diff --git a/test/e2e/sentinel_suite_test.go b/test/e2e/sentinel_suite_test.go index bcb33697..76fcace8 100644 --- a/test/e2e/sentinel_suite_test.go +++ b/test/e2e/sentinel_suite_test.go @@ -8,7 +8,7 @@ import ( "time" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - redisclient "github.com/3scale/saas-operator/pkg/redis/crud/client" + redisclient "github.com/3scale/saas-operator/pkg/redis/client" testutil "github.com/3scale/saas-operator/test/util" "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" @@ -66,6 +66,7 @@ var _ = Describe("sentinel e2e suite", func() { if shard.Status.ShardNodes != nil && shard.Status.ShardNodes.Master != nil { // store the resource for later use shards[i] = shard + GinkgoWriter.Printf("[debug] Shard %s topology: %+v\n", shard.GetName(), *shard.Status.ShardNodes) return nil } else { return fmt.Errorf("RedisShard %s not ready", shard.ObjectMeta.Name) @@ -84,14 +85,14 @@ var _ = Describe("sentinel e2e suite", func() { Config: &saasv1alpha1.SentinelConfig{ MonitoredShards: map[string][]string{ shards[0].GetName(): { - *shards[0].Status.ShardNodes.Master, - shards[0].Status.ShardNodes.Slaves[0], - shards[0].Status.ShardNodes.Slaves[1], + "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), + "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), }, shards[1].GetName(): { - *shards[1].Status.ShardNodes.Master, - shards[1].Status.ShardNodes.Slaves[0], - shards[1].Status.ShardNodes.Slaves[1], + "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), + "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(1), + "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), }, }, }, @@ -135,7 +136,7 @@ var _ = Describe("sentinel e2e suite", func() { for _, shard := range shards { found := false for _, master := range masters { - if strings.Contains(*shard.Status.ShardNodes.Master, master.IP) { + if strings.Contains(shard.Status.ShardNodes.MasterHostPort(), master.IP) { found = true break } @@ -156,30 +157,49 @@ var _ = Describe("sentinel e2e suite", func() { return err } - for i, shard := range shards { - - if diff := cmp.Diff(sentinel.Status.MonitoredShards[i], - saasv1alpha1.MonitoredShard{ - Name: shard.GetName(), - Master: "", - Servers: map[string]saasv1alpha1.RedisServerDetails{ - strings.TrimPrefix(*shard.Status.ShardNodes.Master, "redis://"): { - Role: redisclient.Master, - Config: map[string]string{"save": "900 1 300 10"}, - }, - strings.TrimPrefix(shard.Status.ShardNodes.Slaves[0], "redis://"): { - Role: redisclient.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, - }, - strings.TrimPrefix(shard.Status.ShardNodes.Slaves[1], "redis://"): { - Role: redisclient.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, - }, + if diff := cmp.Diff(sentinel.Status.MonitoredShards, saasv1alpha1.MonitoredShards{ + saasv1alpha1.MonitoredShard{ + Name: "rs0", + Servers: map[string]saasv1alpha1.RedisServerDetails{ + shards[0].Status.ShardNodes.GetHostPortByPodIndex(0): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), + Role: redisclient.Master, + Config: map[string]string{"save": "900 1 300 10"}, + }, + shards[0].Status.ShardNodes.GetHostPortByPodIndex(1): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + }, + shards[0].Status.ShardNodes.GetHostPortByPodIndex(2): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, }, }, - ); diff != "" { - return fmt.Errorf("got unexpected sentinel status %s", diff) - } + }, + saasv1alpha1.MonitoredShard{ + Name: "rs1", + Servers: map[string]saasv1alpha1.RedisServerDetails{ + shards[1].Status.ShardNodes.GetHostPortByPodIndex(0): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + }, + shards[1].Status.ShardNodes.GetHostPortByPodIndex(1): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(1), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + }, + shards[1].Status.ShardNodes.GetHostPortByPodIndex(2): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + Role: redisclient.Master, + Config: map[string]string{"save": "900 1 300 10"}, + }, + }, + }, + }); diff != "" { + return fmt.Errorf("got unexpected sentinel status %s", diff) } return nil @@ -262,28 +282,48 @@ var _ = Describe("sentinel e2e suite", func() { return err } - if diff := cmp.Diff(sentinel.Status.MonitoredShards[0], + if diff := cmp.Diff(sentinel.Status.MonitoredShards, saasv1alpha1.MonitoredShards{ + saasv1alpha1.MonitoredShard{ + Name: "rs0", + Servers: map[string]saasv1alpha1.RedisServerDetails{ + shards[0].Status.ShardNodes.GetHostPortByPodIndex(0): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + }, + shards[0].Status.ShardNodes.GetHostPortByPodIndex(1): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + Role: redisclient.Master, + Config: map[string]string{"save": "900 1 300 10"}, + }, + shards[0].Status.ShardNodes.GetHostPortByPodIndex(2): { + Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + }, + }, + }, saasv1alpha1.MonitoredShard{ - Name: shards[0].GetName(), - Master: "", + Name: "rs1", Servers: map[string]saasv1alpha1.RedisServerDetails{ - // old master is now a slave - strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"): { - Role: redisclient.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + shards[1].Status.ShardNodes.GetHostPortByPodIndex(0): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, }, - // first slave is now the master - strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[0], "redis://"): { - Role: redisclient.Master, - Config: map[string]string{"save": "900 1 300 10"}, + shards[1].Status.ShardNodes.GetHostPortByPodIndex(1): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(1), + Role: redisclient.Slave, + Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, }, - strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"): { - Role: redisclient.Slave, - Config: map[string]string{"save": "900 1 300 10", "slave-read-only": "yes"}, + shards[1].Status.ShardNodes.GetHostPortByPodIndex(2): { + Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + Role: redisclient.Master, + Config: map[string]string{"save": "900 1 300 10"}, }, }, }, - ); diff != "" { + }); diff != "" { return fmt.Errorf("got unexpected sentinel status %s", diff) } diff --git a/test/e2e/twemproxyconfig_suite_test.go b/test/e2e/twemproxyconfig_suite_test.go index d1832542..f3360bba 100644 --- a/test/e2e/twemproxyconfig_suite_test.go +++ b/test/e2e/twemproxyconfig_suite_test.go @@ -2,16 +2,16 @@ package e2e import ( "context" - "encoding/json" "fmt" "sort" - "strings" "time" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" "github.com/3scale/saas-operator/pkg/resource_builders/twemproxy" + "github.com/3scale/saas-operator/pkg/util" testutil "github.com/3scale/saas-operator/test/util" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -41,9 +41,8 @@ var _ = Describe("twemproxyconfig e2e suite", func() { err := k8sClient.Create(context.Background(), testNamespace) Expect(err).ToNot(HaveOccurred()) - n := &corev1.Namespace{} Eventually(func() error { - return k8sClient.Get(context.Background(), types.NamespacedName{Name: ns}, n) + return k8sClient.Get(context.Background(), client.ObjectKeyFromObject(testNamespace), testNamespace) }, timeout, poll).ShouldNot(HaveOccurred()) shards = []saasv1alpha1.RedisShard{ @@ -62,7 +61,7 @@ var _ = Describe("twemproxyconfig e2e suite", func() { Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - err := k8sClient.Get(context.Background(), types.NamespacedName{Name: shard.GetName(), Namespace: ns}, &shard) + err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(&shard), &shard) if err != nil { return err } @@ -81,16 +80,16 @@ var _ = Describe("twemproxyconfig e2e suite", func() { ObjectMeta: metav1.ObjectMeta{Name: "sentinel", Namespace: ns}, Spec: saasv1alpha1.SentinelSpec{ Config: &saasv1alpha1.SentinelConfig{ - MonitoredShards: map[string][]string{ + ClusterTopology: map[string]map[string]string{ shards[0].GetName(): { - *shards[0].Status.ShardNodes.Master, - shards[0].Status.ShardNodes.Slaves[0], - shards[0].Status.ShardNodes.Slaves[1], + "redis-shard-rs0-0": "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), // master + "redis-shard-rs0-1": "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + "redis-shard-rs0-2": "redis://" + shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), }, shards[1].GetName(): { - *shards[1].Status.ShardNodes.Master, - shards[1].Status.ShardNodes.Slaves[0], - shards[1].Status.ShardNodes.Slaves[1], + "redis-shard-rs1-0": "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), // master + "redis-shard-rs1-1": "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(1), + "redis-shard-rs1-2": "redis://" + shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), }, }, }, @@ -102,7 +101,7 @@ var _ = Describe("twemproxyconfig e2e suite", func() { Eventually(func() error { - err := k8sClient.Get(context.Background(), types.NamespacedName{Name: sentinel.GetName(), Namespace: ns}, &sentinel) + err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(&sentinel), &sentinel) Expect(err).ToNot(HaveOccurred()) if len(sentinel.Status.MonitoredShards) != len(shards) { @@ -117,57 +116,69 @@ var _ = Describe("twemproxyconfig e2e suite", func() { BeforeEach(func() { - twemproxyconfig = saasv1alpha1.TwemproxyConfig{ - ObjectMeta: metav1.ObjectMeta{Name: "tmc-instance", Namespace: ns}, - Spec: saasv1alpha1.TwemproxyConfigSpec{ - ServerPools: []saasv1alpha1.TwemproxyServerPool{{ - Name: "test-pool", - Topology: []saasv1alpha1.ShardedRedisTopology{ - {ShardName: "l-shard00", PhysicalShard: shards[0].GetName()}, - {ShardName: "l-shard01", PhysicalShard: shards[0].GetName()}, - {ShardName: "l-shard02", PhysicalShard: shards[0].GetName()}, - {ShardName: "l-shard03", PhysicalShard: shards[1].GetName()}, - {ShardName: "l-shard04", PhysicalShard: shards[1].GetName()}, - }, - BindAddress: "0.0.0.0:22121", - Timeout: 5000, - TCPBacklog: 512, - PreConnect: false, - }}, - GrafanaDashboard: &saasv1alpha1.GrafanaDashboardSpec{}, - }, - } + By("creating the TwemproxyConfig resource targeting masters", func() { + twemproxyconfig = saasv1alpha1.TwemproxyConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "tmc-instance", Namespace: ns}, + Spec: saasv1alpha1.TwemproxyConfigSpec{ + ServerPools: []saasv1alpha1.TwemproxyServerPool{{ + Name: "test-pool", + Topology: []saasv1alpha1.ShardedRedisTopology{ + {ShardName: "l-shard00", PhysicalShard: shards[0].GetName()}, + {ShardName: "l-shard01", PhysicalShard: shards[0].GetName()}, + {ShardName: "l-shard02", PhysicalShard: shards[0].GetName()}, + {ShardName: "l-shard03", PhysicalShard: shards[1].GetName()}, + {ShardName: "l-shard04", PhysicalShard: shards[1].GetName()}, + }, + BindAddress: "0.0.0.0:22121", + Timeout: 5000, + TCPBacklog: 512, + PreConnect: false, + }}, + GrafanaDashboard: &saasv1alpha1.GrafanaDashboardSpec{}, + }, + } - err := k8sClient.Create(context.Background(), &twemproxyconfig) - Expect(err).ToNot(HaveOccurred()) + err := k8sClient.Create(context.Background(), &twemproxyconfig) + Expect(err).ToNot(HaveOccurred()) + }) - Eventually(func() error { - return k8sClient.Get(context.Background(), types.NamespacedName{Name: twemproxyconfig.GetName(), Namespace: ns}, &twemproxyconfig) - }, timeout, poll).ShouldNot(HaveOccurred()) + By("wating until the TwemproxyConfig resource is ready", func() { + Eventually(func() error { + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(&twemproxyconfig), &twemproxyconfig); err != nil { + return err + } + if twemproxyconfig.Status.SelectedTargets == nil { + return fmt.Errorf("status.targets is empty") + } + return nil + }, timeout, poll).ShouldNot(HaveOccurred()) + }) }) It("deploys a ConfigMap with twemproxy configuration that points to redis masters", func() { + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(0)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(0)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), + }, + }, + }), timeout, poll).Should(Not(HaveOccurred())) - cm := &corev1.ConfigMap{} - By("getting the generated ConfigMap", - (&testutil.ExpectedResource{Name: "tmc-instance", Namespace: ns}). - Assert(k8sClient, cm, timeout, poll)) - - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - Expect(err).ToNot(HaveOccurred()) - err = json.Unmarshal(data, &config) - Expect(err).ToNot(HaveOccurred()) - - Expect(config["test-pool"].Servers).To(Equal( + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, []twemproxy.Server{ - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard00"}, - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard01"}, - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(*shards[1].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(*shards[1].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard04"}, - }, - )) + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) + }) When("a redis master is unavailable", func() { @@ -215,39 +226,30 @@ var _ = Describe("twemproxyconfig e2e suite", func() { }) - It("updates the twemproxy configuration with new master", func() { - Eventually(func() error { - - cm := &corev1.ConfigMap{} - err := k8sClient.Get(context.Background(), types.NamespacedName{Name: twemproxyconfig.GetName(), Namespace: ns}, cm) - if err != nil { - return err - } - - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - if err != nil { - return err - } - err = json.Unmarshal(data, &config) - if err != nil { - return err - } + It("updates the twemproxy configuration with the new master", func() { - if diff := cmp.Diff(config["test-pool"].Servers, - []twemproxy.Server{ - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[0], "redis://"), Priority: 1, Name: "l-shard00"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[0], "redis://"), Priority: 1, Name: "l-shard01"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[0], "redis://"), Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(*shards[1].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(*shards[1].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard04"}, + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(1)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(0)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), + }, }, - ); diff != "" { - return fmt.Errorf("got unexpected pool servers %s", diff) - } - - return nil - }, timeout, poll).ShouldNot(HaveOccurred()) + }), timeout, poll).Should(Not(HaveOccurred())) + + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, + []twemproxy.Server{ + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) }) }) @@ -260,7 +262,6 @@ var _ = Describe("twemproxyconfig e2e suite", func() { }) When("TwemproxyConfig resource is created targeting redis rw-slaves", func() { - cm := &corev1.ConfigMap{} BeforeEach(func() { @@ -285,7 +286,7 @@ var _ = Describe("twemproxyconfig e2e suite", func() { }) - By("creating the TwemproxyConfig resource pointing to rw-slaves", func() { + By("creating the TwemproxyConfig resource targeting rw-slaves", func() { twemproxyconfig = saasv1alpha1.TwemproxyConfig{ ObjectMeta: metav1.ObjectMeta{Name: "tmc-instance", Namespace: ns}, Spec: saasv1alpha1.TwemproxyConfigSpec{ @@ -310,35 +311,45 @@ var _ = Describe("twemproxyconfig e2e suite", func() { err := k8sClient.Create(context.Background(), &twemproxyconfig) Expect(err).ToNot(HaveOccurred()) + }) + By("wating until the TwemproxyConfig resource is ready", func() { Eventually(func() error { - return k8sClient.Get(context.Background(), types.NamespacedName{Name: twemproxyconfig.GetName(), Namespace: ns}, &twemproxyconfig) + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(&twemproxyconfig), &twemproxyconfig); err != nil { + return err + } + if twemproxyconfig.Status.SelectedTargets == nil { + return fmt.Errorf("status.targets is empty") + } + return nil }, timeout, poll).ShouldNot(HaveOccurred()) - - By("getting the generated ConfigMap", - (&testutil.ExpectedResource{Name: "tmc-instance", Namespace: ns}). - Assert(k8sClient, cm, timeout, poll)) }) - }) It("deploys a ConfigMap with twemproxy configuration that points to redis rw-slaves", func() { - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - Expect(err).ToNot(HaveOccurred()) - err = json.Unmarshal(data, &config) - Expect(err).ToNot(HaveOccurred()) + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + }, + }), timeout, poll).Should(Not(HaveOccurred())) - Expect(config["test-pool"].Servers).To(Equal( + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, []twemproxy.Server{ - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard00"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard01"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard04"}, - }, - )) + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) }) When("there are no rw-slaves available in a shard it failovers to the master", func() { @@ -367,55 +378,54 @@ var _ = Describe("twemproxyconfig e2e suite", func() { By("checking the config for rs0 points to master", func() { - Eventually(func() []twemproxy.Server { - - By("getting the ConfigMap", - (&testutil.ExpectedResource{Name: "tmc-instance", Namespace: ns}). - Assert(k8sClient, cm, timeout, poll)) - - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - Expect(err).ToNot(HaveOccurred()) - err = json.Unmarshal(data, &config) - Expect(err).ToNot(HaveOccurred()) - return config["test-pool"].Servers + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(0)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + }, + }), timeout, poll).Should(Not(HaveOccurred())) - }, timeout, poll).Should(Equal( + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, []twemproxy.Server{ - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard00"}, - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard01"}, - {Address: strings.TrimPrefix(*shards[0].Status.ShardNodes.Master, "redis://"), Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard04"}, - }, - )) - + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(0), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) }) By("checking the config for rs0 points back to rw-slave once it's recovered", func() { - Eventually(func() []twemproxy.Server { - - By("getting the ConfigMap", - (&testutil.ExpectedResource{Name: "tmc-instance", Namespace: ns}). - Assert(k8sClient, cm, timeout, poll)) - - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - Expect(err).ToNot(HaveOccurred()) - err = json.Unmarshal(data, &config) - Expect(err).ToNot(HaveOccurred()) - return config["test-pool"].Servers + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + }, + }), timeout, poll).Should(Not(HaveOccurred())) - }, timeout, poll).Should(Equal( + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, []twemproxy.Server{ - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard00"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard01"}, - {Address: strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard04"}, - }, - )) + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) }) @@ -444,40 +454,40 @@ var _ = Describe("twemproxyconfig e2e suite", func() { }) }) - It("reconfigures shard rs0 to point to the first slave in alphabetical order (by address)", func() { + It("reconfigures shard rs0 to point to the first slave in alphabetical order (by hostport)", func() { By("checking the twemproxy config for shard rs0", func() { // determine which should be the chosen rw-slave addresses := []string{ - strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[0], "redis://"), - strings.TrimPrefix(shards[0].Status.ShardNodes.Slaves[1], "redis://"), + shards[0].Status.ShardNodes.GetHostPortByPodIndex(1), + shards[0].Status.ShardNodes.GetHostPortByPodIndex(2), } sort.Strings(addresses) - expectedRWSlave := addresses[0] - - Eventually(func() []twemproxy.Server { - - By("getting the ConfigMap", - (&testutil.ExpectedResource{Name: "tmc-instance", Namespace: ns}). - Assert(k8sClient, cm, timeout, poll)) - - config := map[string]twemproxy.ServerPoolConfig{} - data, err := yaml.YAMLToJSON([]byte(cm.Data["nutcracker.yml"])) - Expect(err).ToNot(HaveOccurred()) - err = json.Unmarshal(data, &config) - Expect(err).ToNot(HaveOccurred()) - return config["test-pool"].Servers + idx := shards[0].Status.ShardNodes.GetIndexByHostPort(addresses[0]) + + Eventually(assertTwemproxyConfigStatus(&twemproxyconfig, &sentinel, + &saasv1alpha1.TwemproxyConfigStatus{ + SelectedTargets: map[string]saasv1alpha1.TargetServer{ + shards[0].GetName(): { + ServerAlias: util.Pointer(shards[0].Status.ShardNodes.GetAliasByPodIndex(idx)), + ServerAddress: shards[0].Status.ShardNodes.GetHostPortByPodIndex(idx), + }, + shards[1].GetName(): { + ServerAlias: util.Pointer(shards[1].Status.ShardNodes.GetAliasByPodIndex(2)), + ServerAddress: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), + }, + }, + }), timeout, poll).Should(Not(HaveOccurred())) - }, timeout, poll).Should(Equal( + Eventually(assertTwemproxyConfigServerPool(&twemproxyconfig, []twemproxy.Server{ - {Address: expectedRWSlave, Priority: 1, Name: "l-shard00"}, - {Address: expectedRWSlave, Priority: 1, Name: "l-shard01"}, - {Address: expectedRWSlave, Priority: 1, Name: "l-shard02"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard03"}, - {Address: strings.TrimPrefix(shards[1].Status.ShardNodes.Slaves[1], "redis://"), Priority: 1, Name: "l-shard04"}, - }, - )) + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(idx), Priority: 1, Name: "l-shard00"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(idx), Priority: 1, Name: "l-shard01"}, + {Address: shards[0].Status.ShardNodes.GetHostPortByPodIndex(idx), Priority: 1, Name: "l-shard02"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard03"}, + {Address: shards[1].Status.ShardNodes.GetHostPortByPodIndex(2), Priority: 1, Name: "l-shard04"}, + }), timeout, poll).Should(Not(HaveOccurred())) }) @@ -504,3 +514,49 @@ var _ = Describe("twemproxyconfig e2e suite", func() { }) }) + +func assertTwemproxyConfigStatus(tmc *saasv1alpha1.TwemproxyConfig, sentinel *saasv1alpha1.Sentinel, + want *saasv1alpha1.TwemproxyConfigStatus) func() error { + + return func() error { + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(tmc), tmc); err != nil { + return err + } + + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(sentinel), sentinel); err != nil { + return err + } + + monitoredShards, _ := yaml.Marshal(sentinel.Status.MonitoredShards) + GinkgoWriter.Printf("[debug] cluster status:\n\n %s\n", monitoredShards) + selectedTargets, _ := yaml.Marshal(tmc.Status.SelectedTargets) + GinkgoWriter.Printf("[debug] selected targets:\n\n %s\n", selectedTargets) + + if diff := cmp.Diff(*want, tmc.Status); diff != "" { + return fmt.Errorf("got unexpected status %s", diff) + } + + return nil + } +} + +func assertTwemproxyConfigServerPool(tmc *saasv1alpha1.TwemproxyConfig, want []twemproxy.Server) func() error { + + return func() error { + cm := &corev1.ConfigMap{} + if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(tmc), cm); err != nil { + return err + } + + config := map[string]twemproxy.ServerPoolConfig{} + if err := yaml.Unmarshal([]byte(cm.Data["nutcracker.yml"]), &config); err != nil { + return err + } + + if diff := cmp.Diff(want, config["test-pool"].Servers, cmpopts.IgnoreUnexported(twemproxy.Server{})); diff != "" { + return fmt.Errorf("got unexpected pool servers %s", diff) + } + + return nil + } +} diff --git a/test/util/redis.go b/test/util/redis.go index 0da2fcde..cad1a0a8 100644 --- a/test/util/redis.go +++ b/test/util/redis.go @@ -4,36 +4,35 @@ import ( "fmt" saasv1alpha1 "github.com/3scale/saas-operator/api/v1alpha1" - "github.com/3scale/saas-operator/pkg/redis" - "github.com/3scale/saas-operator/pkg/redis/crud" + redis "github.com/3scale/saas-operator/pkg/redis/server" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ) -func RedisClient(cfg *rest.Config, podKey types.NamespacedName) (*crud.CRUD, chan struct{}, error) { +func RedisClient(cfg *rest.Config, podKey types.NamespacedName) (*redis.Server, chan struct{}, error) { localPort, stopCh, err := PortForward(cfg, podKey, 6379) if err != nil { return nil, nil, err } - rs, err := redis.NewRedisServerFromConnectionString("", fmt.Sprintf("redis://localhost:%d", localPort)) + rs, err := redis.NewServer(fmt.Sprintf("redis://localhost:%d", localPort), nil) if err != nil { return nil, nil, err } - return rs.CRUD, stopCh, nil + return rs, stopCh, nil } -func SentinelClient(cfg *rest.Config, podKey types.NamespacedName) (*crud.CRUD, chan struct{}, error) { +func SentinelClient(cfg *rest.Config, podKey types.NamespacedName) (*redis.Server, chan struct{}, error) { localPort, stopCh, err := PortForward(cfg, podKey, saasv1alpha1.SentinelPort) if err != nil { return nil, nil, err } - ss, err := redis.NewSentinelServerFromConnectionString("", fmt.Sprintf("redis://localhost:%d", localPort)) + ss, err := redis.NewServer(fmt.Sprintf("redis://localhost:%d", localPort), nil) if err != nil { return nil, nil, err } - return ss.CRUD, stopCh, nil + return ss, stopCh, nil }