diff --git a/Makefile b/Makefile index 07dacd7..3c95da1 100644 --- a/Makefile +++ b/Makefile @@ -23,9 +23,7 @@ teardown: @bash scripts/teardown.sh test: - - cd scripts/spanner && docker-compose up --force-recreate -d && cd ../.. @sh scripts/run-test.sh - - cd scripts/spanner && docker-compose down && cd ../.. lint: @rm -rf lint.log diff --git a/config/config.go b/config/config.go index 7db4f1d..828bd2b 100644 --- a/config/config.go +++ b/config/config.go @@ -85,16 +85,6 @@ type RedisConf struct { MasterName string Version string SentinelPassword string - - EnableSecondaryStorage bool - - // number of seconds. when job's delay second is greater than pumpStorageThresh, - // it will be written to storage if enabled - SecondaryStorageThresholdSeconds int64 -} - -func (c *Config) HasSecondaryStorage() bool { - return c.SecondaryStorage != nil } func (rc *RedisConf) validate() error { @@ -104,9 +94,6 @@ func (rc *RedisConf) validate() error { if rc.DB < 0 { return errors.New("the pool db must be greater than 0 or equal to 0") } - if rc.EnableSecondaryStorage && rc.SecondaryStorageThresholdSeconds < minSecondaryStorageThresholdSeconds { - return errors.New("write to secondary storage threshold required at least 1 hour") - } return nil } diff --git a/config/config_test.go b/config/config_test.go index 8c03607..e1cb661 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -33,10 +33,4 @@ func TestRedisConfig_Validate(t *testing.T) { if err := conf.validate(); err != nil { t.Fatalf("no error was expected, but got %v", err) } - - conf.EnableSecondaryStorage = true - conf.SecondaryStorageThresholdSeconds = 10 - if err := conf.validate(); err == nil { - t.Fatalf("validate addr error was expected, but got nil") - } } diff --git a/engine/job.go b/engine/job.go index afe6a75..d88dd01 100644 --- a/engine/job.go +++ b/engine/job.go @@ -5,9 +5,7 @@ import ( "encoding/binary" "encoding/json" "errors" - "google.golang.org/protobuf/proto" - "github.com/bitleak/lmstfy/engine/model" "github.com/bitleak/lmstfy/uuid" ) @@ -51,26 +49,6 @@ type jobImpl struct { _elapsedMS int64 } -// NewJobFromReq creates a new job with its body and attributes being marshalled -func NewJobFromReq(req *CreateJobReq) Job { - if req.ID == "" { - req.ID = uuid.GenUniqueJobIDWithDelay(req.Delay) - } - jobData, err := marshalJobBody(req.Body, req.Attributes) - if err != nil { - return &jobImpl{} - } - return &jobImpl{ - namespace: req.Namespace, - queue: req.Queue, - id: req.ID, - body: jobData, - ttl: req.TTL, - delay: req.Delay, - tries: req.Tries, - } -} - // NOTE: there is a trick in this factory, the delay is embedded in the jobID. // By doing this we can delete the job that's located in hourly AOF, by placing // a tombstone record in that AOF. @@ -229,11 +207,3 @@ func (j *jobImpl) MarshalText() (text []byte, err error) { func (j *jobImpl) GetDelayHour() uint16 { return 0 } - -func marshalJobBody(body []byte, attrs map[string]string) ([]byte, error) { - job := &model.JobData{ - Data: body, - Attributes: attrs, - } - return proto.Marshal(job) -} diff --git a/engine/model/job.pb.go b/engine/model/job.pb.go deleted file mode 100644 index ae6426c..0000000 --- a/engine/model/job.pb.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: engine/redis_v2/model/job.proto - -package model - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type JobData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *JobData) Reset() { - *x = JobData{} - if protoimpl.UnsafeEnabled { - mi := &file_engine_redis_v2_model_job_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JobData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JobData) ProtoMessage() {} - -func (x *JobData) ProtoReflect() protoreflect.Message { - mi := &file_engine_redis_v2_model_job_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JobData.ProtoReflect.Descriptor instead. -func (*JobData) Descriptor() ([]byte, []int) { - return file_engine_redis_v2_model_job_proto_rawDescGZIP(), []int{0} -} - -func (x *JobData) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -func (x *JobData) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -var File_engine_redis_v2_model_job_proto protoreflect.FileDescriptor - -var file_engine_redis_v2_model_job_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x76, - 0x32, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x6a, 0x6f, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x05, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x22, 0x9c, 0x01, 0x0a, 0x07, 0x4a, 0x6f, 0x62, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, - 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x4a, 0x6f, 0x62, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6d, 0x73, 0x74, 0x66, 0x79, 0x2f, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x2f, 0x72, 0x65, 0x64, 0x69, 0x73, 0x5f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x64, - 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_engine_redis_v2_model_job_proto_rawDescOnce sync.Once - file_engine_redis_v2_model_job_proto_rawDescData = file_engine_redis_v2_model_job_proto_rawDesc -) - -func file_engine_redis_v2_model_job_proto_rawDescGZIP() []byte { - file_engine_redis_v2_model_job_proto_rawDescOnce.Do(func() { - file_engine_redis_v2_model_job_proto_rawDescData = protoimpl.X.CompressGZIP(file_engine_redis_v2_model_job_proto_rawDescData) - }) - return file_engine_redis_v2_model_job_proto_rawDescData -} - -var file_engine_redis_v2_model_job_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_engine_redis_v2_model_job_proto_goTypes = []interface{}{ - (*JobData)(nil), // 0: model.JobData - nil, // 1: model.JobData.AttributesEntry -} -var file_engine_redis_v2_model_job_proto_depIdxs = []int32{ - 1, // 0: model.JobData.attributes:type_name -> model.JobData.AttributesEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_engine_redis_v2_model_job_proto_init() } -func file_engine_redis_v2_model_job_proto_init() { - if File_engine_redis_v2_model_job_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_engine_redis_v2_model_job_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JobData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_engine_redis_v2_model_job_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_engine_redis_v2_model_job_proto_goTypes, - DependencyIndexes: file_engine_redis_v2_model_job_proto_depIdxs, - MessageInfos: file_engine_redis_v2_model_job_proto_msgTypes, - }.Build() - File_engine_redis_v2_model_job_proto = out.File - file_engine_redis_v2_model_job_proto_rawDesc = nil - file_engine_redis_v2_model_job_proto_goTypes = nil - file_engine_redis_v2_model_job_proto_depIdxs = nil -} diff --git a/engine/redis_v2/constant.go b/engine/redis_v2/constant.go deleted file mode 100644 index e36ab6e..0000000 --- a/engine/redis_v2/constant.go +++ /dev/null @@ -1,10 +0,0 @@ -package redis_v2 - -const ( - PoolPrefix = "j2" - QueuePrefix = "q2" - DeadLetterPrefix = "d2" - MetaPrefix = "m2" - - BatchSize = int64(100) -) diff --git a/engine/redis_v2/deadletter.go b/engine/redis_v2/deadletter.go deleted file mode 100644 index 13db515..0000000 --- a/engine/redis_v2/deadletter.go +++ /dev/null @@ -1,253 +0,0 @@ -package redis_v2 - -import ( - "errors" - "fmt" - "time" - - "github.com/bitleak/lmstfy/engine" - - go_redis "github.com/go-redis/redis/v8" -) - -const ( - luaRespawnDeadletterScript = ` -local deadletter = KEYS[1] -local queue = KEYS[2] -local poolPrefix = KEYS[3] -local limit = tonumber(ARGV[1]) -local respawnTTL = tonumber(ARGV[2]) - -for i = 1, limit do - local data = redis.call("RPOPLPUSH", deadletter, queue) - if data == false then - return i - 1 -- deadletter is empty - end - -- unpack the jobID, and set the TTL - local _, jobID = struct.unpack("HHc0", data) - if respawnTTL > 0 then - redis.call("EXPIRE", poolPrefix .. "/" .. jobID, respawnTTL) - end -end -return limit -- deadletter has more data when return value is >= limit -` - luaDeleteDeadletterScript = ` -local deadletter = KEYS[1] -local poolPrefix = KEYS[2] -local limit = tonumber(ARGV[1]) - -for i = 1, limit do - local data = redis.call("RPOP", deadletter) - if data == false then - return i - 1 - end - -- unpack the jobID, and delete the job from the job pool - local _, jobID = struct.unpack("HHc0", data) - redis.call("DEL", poolPrefix .. "/" .. jobID) -end -return limit -` -) - -var ( - respawnDeadletterSHA string - deleteDeadletterSHA string -) - -// Because the DeadLetter is not like Timer which is a singleton, -// DeadLetters are transient objects like Queue. So we have to preload -// the lua scripts separately. -func PreloadDeadLetterLuaScript(redis *RedisInstance) error { - sha, err := redis.Conn.ScriptLoad(dummyCtx, luaRespawnDeadletterScript).Result() - if err != nil { - return fmt.Errorf("failed to preload lua script: %s", err) - } - respawnDeadletterSHA = sha - - sha, err = redis.Conn.ScriptLoad(dummyCtx, luaDeleteDeadletterScript).Result() - if err != nil { - return fmt.Errorf("failed to preload luascript: %s", err) - } - deleteDeadletterSHA = sha - return nil -} - -// DeadLetter is where dead job will be buried, the job can be respawned into ready queue -type DeadLetter struct { - redis *RedisInstance - namespace string - queue string -} - -// NewDeadLetter return an instance of DeadLetter storage -func NewDeadLetter(namespace, queue string, redis *RedisInstance) (*DeadLetter, error) { - dl := &DeadLetter{ - redis: redis, - namespace: namespace, - queue: queue, - } - if respawnDeadletterSHA == "" || deleteDeadletterSHA == "" { - return nil, errors.New("dead letter's lua script is not preloaded") - } - return dl, nil -} - -func (dl *DeadLetter) Name() string { - return join(DeadLetterPrefix, dl.namespace, dl.queue) -} - -// Add a job to dead letter. NOTE the data format is the same -// as the ready queue (lua struct `HHc0`), by doing this we could directly pop -// the dead job back to the ready queue. -// -// NOTE: this method is not called any where except in tests, but this logic is -// implement in the timer's pump script. please refer to that. -func (dl *DeadLetter) Add(jobID string) error { - val := structPack(1, jobID) - if err := dl.redis.Conn.Persist(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID)).Err(); err != nil { - return err - } - return dl.redis.Conn.LPush(dummyCtx, dl.Name(), val).Err() -} - -func (dl *DeadLetter) Peek() (size int64, jobID string, err error) { - val, err := dl.redis.Conn.LIndex(dummyCtx, dl.Name(), -1).Result() - switch err { - case nil: - // continue - case go_redis.Nil: - return 0, "", engine.ErrNotFound - default: - return 0, "", err - } - tries, jobID, err := structUnpack(val) - if err != nil || tries != 1 { - return 0, "", fmt.Errorf("failed to unpack data: %s", err) - } - size, err = dl.redis.Conn.LLen(dummyCtx, dl.Name()).Result() - if err != nil { - return 0, "", err - } - return size, jobID, nil -} - -func (dl *DeadLetter) Delete(limit int64) (count int64, err error) { - if limit > 1 { - poolPrefix := PoolJobKeyPrefix(dl.namespace, dl.queue) - var batchSize int64 = 100 - if batchSize > limit { - batchSize = limit - } - for { - val, err := dl.redis.Conn.EvalSha(dummyCtx, deleteDeadletterSHA, []string{dl.Name(), poolPrefix}, batchSize).Result() - if err != nil { - if isLuaScriptGone(err) { - if err := PreloadDeadLetterLuaScript(dl.redis); err != nil { - logger.WithField("err", err).Error("Failed to load deadletter lua script") - } - continue - } - return count, err - } - n, _ := val.(int64) - count += n - if n < batchSize { // Dead letter is empty - break - } - if count >= limit { - break - } - if limit-count < batchSize { - batchSize = limit - count // This is the last batch, we should't respawn jobs exceeding the limit. - } - } - return count, nil - } else if limit == 1 { - data, err := dl.redis.Conn.RPop(dummyCtx, dl.Name()).Result() - if err != nil { - if err == go_redis.Nil { - return 0, nil - } - return 0, err - } - _, jobID, err := structUnpack(data) - if err != nil { - return 1, err - } - err = dl.redis.Conn.Del(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID)).Err() - if err != nil { - return 1, fmt.Errorf("failed to delete job data: %s", err) - } - return 1, nil - } else { - return 0, nil - } -} - -func (dl *DeadLetter) Respawn(limit, ttlSecond int64) (count int64, err error) { - defer func() { - if err != nil && count != 0 { - metrics.deadletterRespawnJobs.WithLabelValues(dl.redis.Name).Add(float64(count)) - } - }() - queueName := (&QueueName{ - Namespace: dl.namespace, - Queue: dl.queue, - }).String() - poolPrefix := PoolJobKeyPrefix(dl.namespace, dl.queue) - if limit > 1 { - var batchSize = BatchSize - if batchSize > limit { - batchSize = limit - } - for { - val, err := dl.redis.Conn.EvalSha(dummyCtx, respawnDeadletterSHA, []string{dl.Name(), queueName, poolPrefix}, batchSize, ttlSecond).Result() // Respawn `batchSize` jobs at a time - if err != nil { - if isLuaScriptGone(err) { - if err := PreloadDeadLetterLuaScript(dl.redis); err != nil { - logger.WithField("err", err).Error("Failed to load deadletter lua script") - } - continue - } - return 0, err - } - n, _ := val.(int64) - count += n - if n < batchSize { // Dead letter is empty - break - } - if count >= limit { - break - } - if limit-count < batchSize { - batchSize = limit - count // This is the last batch, we should't respawn jobs exceeding the limit. - } - } - return count, nil - } else if limit == 1 { - data, err := dl.redis.Conn.RPopLPush(dummyCtx, dl.Name(), queueName).Result() - if err != nil { - if err == go_redis.Nil { - return 0, nil - } - return 0, err - } - _, jobID, err := structUnpack(data) - if err != nil { - return 1, err - } - if ttlSecond > 0 { - err = dl.redis.Conn.Expire(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID), time.Duration(ttlSecond)*time.Second).Err() - } - if err != nil { - return 1, fmt.Errorf("failed to set TTL on respawned job[%s]: %s", jobID, err) - } - return 1, nil - } else { - return 0, nil - } -} - -func (dl *DeadLetter) Size() (size int64, err error) { - return dl.redis.Conn.LLen(dummyCtx, dl.Name()).Result() -} diff --git a/engine/redis_v2/deadletter_test.go b/engine/redis_v2/deadletter_test.go deleted file mode 100644 index 28de68d..0000000 --- a/engine/redis_v2/deadletter_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package redis_v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/bitleak/lmstfy/engine" -) - -func TestDeadLetter_Add(t *testing.T) { - dl, _ := NewDeadLetter("ns-dead", "q0", R) - if err := dl.Add("x"); err != nil { - - } -} - -func TestDeadLetter_Peek(t *testing.T) { - dl, _ := NewDeadLetter("ns-dead", "q1", R) - dl.Add("x") - dl.Add("y") - dl.Add("z") - - size, jobID, err := dl.Peek() - if err != nil { - t.Fatalf("Failed to peek deadletter: %s", err) - } - if size != 3 || jobID != "x" { - t.Fatal("Mismatched job") - } -} - -func TestDeadLetter_Delete(t *testing.T) { - dl, _ := NewDeadLetter("ns-dead", "q2", R) - dl.Add("x") - dl.Add("y") - dl.Add("z") - - count, err := dl.Delete(2) - if err != nil || count != 2 { - t.Fatalf("Failed to delete two jobs from deadletter") - } - size, jobID, _ := dl.Peek() - if size != 1 || jobID != "z" { - t.Fatal("Expected two jobs in deadletter") - } - - count, err = dl.Delete(1) - if err != nil || count != 1 { - t.Fatalf("Failed to delete job from deadletter") - } - size, jobID, _ = dl.Peek() - if size != 0 { - t.Fatal("Expected no job in deadletter") - } -} - -func TestDeadLetter_Respawn(t *testing.T) { - p := NewPool(R) - job1 := engine.NewJob("ns-dead", "q3", []byte("1"), 60, 0, 1, "") - job2 := engine.NewJob("ns-dead", "q3", []byte("2"), 60, 0, 1, "") - job3 := engine.NewJob("ns-dead", "q3", []byte("3"), 60, 0, 1, "") - p.Add(job1) - p.Add(job2) - p.Add(job3) - dl, _ := NewDeadLetter("ns-dead", "q3", R) - dl.Add(job1.ID()) - dl.Add(job2.ID()) - dl.Add(job3.ID()) - - // Ensure TTL is removed when put into deadletter - job1Key := PoolJobKey(job1) - job1TTL := R.Conn.TTL(dummyCtx, job1Key).Val() - if job1TTL.Seconds() > 0 { - t.Fatalf("Respawned job's TTL should be removed") - } - - timer, err := NewTimer("ns-dead", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - q := NewQueue("ns-dead", "q3", R, timer) - - count, err := dl.Respawn(2, 10) - if err != nil || count != 2 { - t.Fatalf("Failed to respawn two jobs: %s", err) - } - jobID, _, err := q.Poll(1, 1) - if err != nil || jobID != job1.ID() { - t.Fatal("Expected to poll the first job respawned from deadletter") - } - // Ensure TTL is set - job1Key = PoolJobKey(job1) - job1TTL = R.Conn.TTL(dummyCtx, job1Key).Val() - if 10-job1TTL.Seconds() > 2 { // 2 seconds passed? no way. - t.Fatal("Deadletter job's TTL is not correct") - } - q.Poll(1, 1) // rm job2 - - count, err = dl.Respawn(1, 10) - if err != nil || count != 1 { - t.Fatalf("Failed to respawn one jobs: %s", err) - } - jobID, _, err = q.Poll(1, 1) - if err != nil || jobID != job3.ID() { - t.Fatal("Expected to poll the second job respawned from deadletter") - } - - // Ensure TTL is set - job2Key := PoolJobKey(job2) - job2TTL := R.Conn.TTL(dummyCtx, job2Key).Val() - if 10-job2TTL.Seconds() > 2 { - t.Fatal("Deadletter job's TTL is not correct") - } -} - -func TestDeadLetter_Size(t *testing.T) { - p := NewPool(R) - dl, _ := NewDeadLetter("ns-dead", "q3", R) - cnt := 3 - for i := 0; i < cnt; i++ { - job := engine.NewJob("ns-dead", "q3", []byte("1"), 60, 0, 1, "") - p.Add(job) - dl.Add(job.ID()) - } - size, _ := dl.Size() - if size != int64(cnt) { - t.Fatalf("Expected the deadletter queue size is: %d, but got %d\n", cnt, size) - } - dl.Delete(3) - size, _ = dl.Size() - if size != 0 { - t.Fatalf("Expected the deadletter queue size is: %d, but got %d\n", 0, size) - } -} diff --git a/engine/redis_v2/engine.go b/engine/redis_v2/engine.go deleted file mode 100644 index e71f157..0000000 --- a/engine/redis_v2/engine.go +++ /dev/null @@ -1,320 +0,0 @@ -package redis_v2 - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "time" - - go_redis "github.com/go-redis/redis/v8" - "google.golang.org/protobuf/proto" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/engine/model" - "github.com/bitleak/lmstfy/storage" - "github.com/bitleak/lmstfy/uuid" -) - -type RedisInstance struct { - Name string - Conn *go_redis.Client -} - -// Engine that connects all the dots including: -// - store jobs to timer set or ready queue -// - deliver jobs to clients -// - manage dead letters -type Engine struct { - cfg *config.RedisConf - redis *RedisInstance - pool *Pool - timer *Timer - meta *MetaManager - monitor *SizeMonitor -} - -func NewEngine(redisName string, cfg *config.RedisConf, conn *go_redis.Client) (engine.Engine, error) { - redis := &RedisInstance{ - Name: redisName, - Conn: conn, - } - if err := PreloadDeadLetterLuaScript(redis); err != nil { - return nil, err - } - if err := PreloadQueueLuaScript(redis); err != nil { - return nil, err - } - go RedisInstanceMonitor(redis) - meta := NewMetaManager(redis) - timer, err := NewTimer("timer_set_v2", redis, time.Second, 600*time.Second) - if err != nil { - return nil, err - } - metadata, err := meta.Dump() - if err != nil { - return nil, err - } - monitor := NewSizeMonitor(redis, timer, metadata) - go monitor.Loop() - return &Engine{ - cfg: cfg, - redis: redis, - pool: NewPool(redis), - timer: timer, - meta: meta, - monitor: monitor, - }, nil -} - -func (e *Engine) Publish(job engine.Job) (jobID string, err error) { - defer func() { - if err == nil { - metrics.publishJobs.WithLabelValues(e.redis.Name).Inc() - metrics.publishQueueJobs.WithLabelValues(e.redis.Name, job.Namespace(), job.Queue()).Inc() - } - }() - return e.publishJob(job) -} - -func (e *Engine) sink2SecondStorage(ctx context.Context, job engine.Job) error { - return storage.Get().AddJob(ctx, e.redis.Name, job) -} - -// BatchConsume consume some jobs of a queue -func (e *Engine) BatchConsume(namespace string, queues []string, count, ttrSecond, timeoutSecond uint32) (jobs []engine.Job, err error) { - jobs = make([]engine.Job, 0) - // timeout is 0 to fast check whether there is any job in the ready queue, - // if any, we wouldn't be blocked until the new job was published. - for i := uint32(0); i < count; i++ { - job, err := e.Consume(namespace, queues, ttrSecond, 0) - if err != nil { - return jobs, err - } - if job == nil { - break - } - jobs = append(jobs, job) - } - // If there is no job and consumed in block mode, wait for a single job and return - if timeoutSecond > 0 && len(jobs) == 0 { - job, err := e.Consume(namespace, queues, ttrSecond, timeoutSecond) - if err != nil { - return jobs, err - } - if job != nil { - jobs = append(jobs, job) - } - return jobs, nil - } - return jobs, nil -} - -// Consume multiple queues under the same namespace. the queue order implies priority: -// the first queue in the list is of the highest priority when that queue has job ready to -// be consumed. if none of the queues has any job, then consume wait for any queue that -// has job first. -func (e *Engine) Consume(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job engine.Job, err error) { - return e.consumeMulti(namespace, queues, ttrSecond, timeoutSecond) -} - -func (e *Engine) consumeMulti(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job engine.Job, err error) { - defer func() { - if job != nil { - metrics.consumeMultiJobs.WithLabelValues(e.redis.Name).Inc() - metrics.consumeQueueJobs.WithLabelValues(e.redis.Name, namespace, job.Queue()).Inc() - } - }() - queueNames := make([]QueueName, len(queues)) - for i, q := range queues { - queueNames[i].Namespace = namespace - queueNames[i].Queue = q - } - for { - startTime := time.Now().Unix() - queueName, jobID, tries, err := PollQueues(e.redis, e.timer, queueNames, timeoutSecond, ttrSecond) - if err != nil { - return nil, fmt.Errorf("queue: %s", err) - } - if jobID == "" { - return nil, nil - } - endTime := time.Now().Unix() - body, ttl, err := e.pool.Get(namespace, queueName.Queue, jobID) - switch err { - case nil: - // no-op - case engine.ErrNotFound: - timeoutSecond = timeoutSecond - uint32(endTime-startTime) - if timeoutSecond > 0 { - // This can happen if the job's delay time is larger than job's ttl, - // so when the timer fires the job ID, the actual job data is long gone. - // When so, we should use what's left in the timeoutSecond to keep on polling. - // - // Other scene is: A consumer DELETE the job _after_ TTR, and B consumer is - // polling on the queue, and get notified to retry the job, but only to find that - // job was deleted by A. - continue - } else { - return nil, nil - } - default: - return nil, fmt.Errorf("pool: %s", err) - } - res := &model.JobData{} - if err = proto.Unmarshal(body, res); err != nil { - return nil, err - } - job = engine.NewJobWithID(namespace, queueName.Queue, res.GetData(), ttl, tries, jobID, res.GetAttributes()) - metrics.jobElapsedMS.WithLabelValues(e.redis.Name, namespace, queueName.Queue).Observe(float64(job.ElapsedMS())) - return job, nil - } -} - -func (e *Engine) Delete(namespace, queue, jobID string) error { - _ = e.timer.removeFromBackup(namespace, queue, jobID) - err := e.pool.Delete(namespace, queue, jobID) - if err == nil { - elapsedMS, _ := uuid.ElapsedMilliSecondFromUniqueID(jobID) - metrics.jobAckElapsedMS.WithLabelValues(e.redis.Name, namespace, queue).Observe(float64(elapsedMS)) - } - return err -} - -func (e *Engine) Peek(namespace, queue, optionalJobID string) (job engine.Job, err error) { - jobID := optionalJobID - var tries uint16 - if optionalJobID == "" { - q := NewQueue(namespace, queue, e.redis, e.timer) - jobID, tries, err = q.Peek() - switch err { - case nil: - // continue - case engine.ErrNotFound: - return nil, engine.ErrEmptyQueue - default: - return nil, fmt.Errorf("failed to peek queue: %s", err) - } - } - body, ttl, err := e.pool.Get(namespace, queue, jobID) - // Tricky: we shouldn't return the not found error when the job was not found, - // since the job may be expired(TTL was reached) and it would confuse the user, so - // we return the nil job instead of the not found error here. But if the `optionalJobID` - // was assigned we should return the not fond error. - if optionalJobID == "" && err == engine.ErrNotFound { - // return jobID with nil body if the job is expired - return engine.NewJobWithID(namespace, queue, nil, 0, 0, jobID, nil), nil - } - - // look up job data in storage - if err == engine.ErrNotFound && e.cfg.EnableSecondaryStorage { - res, err := storage.Get().GetJobByID(context.TODO(), optionalJobID) - if err != nil { - return nil, err - } - if len(res) == 0 { - return nil, engine.ErrNotFound - } - body = res[0].Body() - } - data := &model.JobData{} - if err = proto.Unmarshal(body, data); err != nil { - return nil, err - } - return engine.NewJobWithID(namespace, queue, data.GetData(), ttl, tries, jobID, data.GetAttributes()), err -} - -func (e *Engine) Size(namespace, queue string) (size int64, err error) { - q := NewQueue(namespace, queue, e.redis, e.timer) - return q.Size() -} - -func (e *Engine) Destroy(namespace, queue string) (count int64, err error) { - e.meta.Remove(namespace, queue) - e.monitor.Remove(namespace, queue) - q := NewQueue(namespace, queue, e.redis, e.timer) - return q.Destroy() -} - -func (e *Engine) PeekDeadLetter(namespace, queue string) (size int64, jobID string, err error) { - dl, err := NewDeadLetter(namespace, queue, e.redis) - if err != nil { - return 0, "", err - } - return dl.Peek() -} - -func (e *Engine) DeleteDeadLetter(namespace, queue string, limit int64) (count int64, err error) { - dl, err := NewDeadLetter(namespace, queue, e.redis) - if err != nil { - return 0, err - } - return dl.Delete(limit) -} - -func (e *Engine) RespawnDeadLetter(namespace, queue string, limit, ttlSecond int64) (count int64, err error) { - dl, err := NewDeadLetter(namespace, queue, e.redis) - if err != nil { - return 0, err - } - return dl.Respawn(limit, ttlSecond) -} - -// SizeOfDeadLetter return the queue size of dead letter -func (e *Engine) SizeOfDeadLetter(namespace, queue string) (size int64, err error) { - dl, err := NewDeadLetter(namespace, queue, e.redis) - if err != nil { - return 0, err - } - return dl.Size() -} - -func (e *Engine) Shutdown() { - e.timer.Shutdown() -} - -func (e *Engine) DumpInfo(out io.Writer) error { - metadata, err := e.meta.Dump() - if err != nil { - return err - } - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - return enc.Encode(metadata) -} - -func (e *Engine) publishJob(job engine.Job) (jobID string, err error) { - e.meta.RecordIfNotExist(job.Namespace(), job.Queue()) - e.monitor.MonitorIfNotExist(job.Namespace(), job.Queue()) - if job.Tries() == 0 { - return job.ID(), errors.New("invalid job: tries cannot be zero") - } - delaySecond := job.Delay() - if e.cfg.EnableSecondaryStorage && - storage.Get() != nil && - delaySecond > uint32(e.cfg.SecondaryStorageThresholdSeconds) { - if err := e.sink2SecondStorage(context.TODO(), job); err == nil { - return job.ID(), nil - } - } - err = e.pool.Add(job) - if err != nil { - return job.ID(), fmt.Errorf("pool: %w", err) - } - - if delaySecond == 0 { - q := NewQueue(job.Namespace(), job.Queue(), e.redis, e.timer) - err = q.Push(job) - if err != nil { - err = fmt.Errorf("queue: %s", err) - } - return job.ID(), err - } - err = e.timer.Add(job.Namespace(), job.Queue(), job.ID(), delaySecond, job.Tries()) - if err != nil { - err = fmt.Errorf("timer: %s", err) - } - return job.ID(), err -} diff --git a/engine/redis_v2/engine_test.go b/engine/redis_v2/engine_test.go deleted file mode 100644 index 746d1f3..0000000 --- a/engine/redis_v2/engine_test.go +++ /dev/null @@ -1,537 +0,0 @@ -package redis_v2 - -import ( - "bytes" - "fmt" - "testing" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/storage" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - redisMaxMemory = "10000000" - dummyPoolConf = &config.RedisConf{} - attributes = map[string]string{"flag": "1", "label": "abc"} -) - -func TestEngine_Publish(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 1") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q1", - ID: "", - Body: body, - TTL: 10, - Delay: 2, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - - // Publish no-delay job - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q1", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - jobID, err = e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - - // Publish no-delay job with attributes - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q1", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: attributes, - }) - jobID, err = e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } -} - -func TestEngine_Publish_SecondaryStorage(t *testing.T) { - manager, err := storage.NewManger(testConfig.Config) - require.Nil(t, err) - - e, err := NewEngine(R.Name, &config.RedisConf{ - EnableSecondaryStorage: true, - SecondaryStorageThresholdSeconds: 0, - }, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - - err = R.Conn.ConfigSet(dummyCtx, "maxmemory", redisMaxMemory).Err() - assert.Nil(t, err) - - defer manager.Shutdown() - manager.AddPool(R.Name, e, 0) - // Publish long-delay job - body := []byte("hello msg long delay job") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "qs", - ID: "", - Body: body, - TTL: 120, - Delay: 1, - Tries: 1, - Attributes: attributes, - }) - jobID, err := e.Publish(j) - assert.Nil(t, err) - - job, err := e.Consume("ns-engine", []string{"qs"}, 3, 10) - assert.Nil(t, err) - assert.EqualValues(t, jobID, job.ID()) - assert.EqualValues(t, body, job.Body()) - assert.EqualValues(t, body, job.Body()) - assert.NotNil(t, job.Attributes()) - assert.EqualValues(t, "1", job.Attributes()["flag"]) - assert.EqualValues(t, "abc", job.Attributes()["label"]) -} - -func TestEngine_Consume(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 2") - //j := engine.NewJob("ns-engine", "q2", body, 10, 2, 1, "", "") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q2", - ID: "", - Body: body, - TTL: 10, - Delay: 2, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err := e.Consume("ns-engine", []string{"q2"}, 3, 3) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if job.Tries() != 0 { - t.Fatalf("job tries = 0 was expected, but got %d", job.Tries()) - } - if !bytes.Equal(body, job.Body()) || jobID != job.ID() { - t.Fatalf("Mistmatched job data") - } - - // Consume job that's published in no-delay way - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q2", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - jobID, err = e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err = e.Consume("ns-engine", []string{"q2"}, 3, 0) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if !bytes.Equal(body, job.Body()) || jobID != job.ID() { - t.Fatalf("Mistmatched job data") - } - - // Consume job with attributes - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q2", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: attributes, - }) - jobID, err = e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err = e.Consume("ns-engine", []string{"q2"}, 3, 0) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if !bytes.Equal(body, job.Body()) || jobID != job.ID() { - t.Fatalf("Mistmatched job data") - } - assert.NotNil(t, job.Attributes()) - assert.EqualValues(t, "1", job.Attributes()["flag"]) - assert.EqualValues(t, "abc", job.Attributes()["label"]) -} - -// Consume the first one from multi publish -func TestEngine_Consume2(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 3") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q3", - ID: "", - Body: []byte("delay msg"), - TTL: 10, - Delay: 5, - Tries: 1, - Attributes: nil, - }) - - _, err = e.Publish(j) - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q3", - ID: "", - Body: body, - TTL: 10, - Delay: 2, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err := e.Consume("ns-engine", []string{"q3"}, 3, 3) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if job.Tries() != 0 { - t.Fatalf("job tries = 0 was expected, but got %d", job.Tries()) - } - if !bytes.Equal(body, job.Body()) || jobID != job.ID() { - t.Fatalf("Mistmatched job data") - } -} - -func TestEngine_ConsumeMulti(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 4") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q4", - ID: "", - Body: body, - TTL: 10, - Delay: 3, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q5", - ID: "", - Body: body, - TTL: 10, - Delay: 1, - Tries: 1, - Attributes: nil, - }) - jobID2, err := e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - - job2, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5) - if err != nil { - t.Fatalf("Failed to consume from multiple queues: %s", err) - } - if job2.Tries() != 0 { - t.Fatalf("job tries = 0 was expected, but got %d", job2.Tries()) - } - if job2.Queue() != "q5" || job2.ID() != jobID2 { // q5's job should be fired first - t.Error("Mismatched job data") - } - - job1, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5) - if err != nil { - t.Fatalf("Failed to consume from multiple queues: %s", err) - } - if job1.Tries() != 0 { - t.Fatalf("job tries = 0 was expected, but got %d", job1.Tries()) - } - if job1.Queue() != "q4" || job1.ID() != jobID { // q4's job should be fired next - t.Fatalf("Failed to consume from multiple queues: %s", err) - } -} - -func TestEngine_Peek(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 6") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q6", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err := e.Peek("ns-engine", "q6", "") - if err != nil { - t.Fatalf("Failed to peek: %s", err) - } - if job.ID() != jobID || !bytes.Equal(job.Body(), body) { - t.Fatal("Mismatched job") - } - - _, err = e.Consume("ns-engine", []string{"q6"}, 5, 0) - if err != nil { - t.Fatalf("Failed to consume previous queue job: %s", err) - } - - // test peek job with attributes - j = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q6", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: attributes, - }) - jobID, err = e.Publish(j) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - job, err = e.Peek("ns-engine", "q6", "") - if err != nil { - t.Fatalf("Failed to peek: %s", err) - } - if job.ID() != jobID || !bytes.Equal(job.Body(), body) { - t.Fatal("Mismatched job") - } - assert.NotNil(t, job.Attributes()) - assert.EqualValues(t, "1", job.Attributes()["flag"]) - assert.EqualValues(t, "abc", job.Attributes()["label"]) -} - -func TestEngine_Peek_SecondaryStorage(t *testing.T) { - e, err := NewEngine(R.Name, &config.RedisConf{ - EnableSecondaryStorage: true, - SecondaryStorageThresholdSeconds: 10, - }, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - - manager, err := storage.NewManger(testConfig.Config) - defer manager.Shutdown() - require.Nil(t, err) - manager.AddPool(R.Name, e, 30) - - // Publish long-delay job - body := []byte("engine peek long delay job") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "qst", - ID: "", - Body: body, - TTL: 120, - Delay: 45, - Tries: 1, - Attributes: attributes, - }) - - jobID, err := e.Publish(j) - t.Log(jobID) - assert.Nil(t, err) - job, err := e.Peek("ns-engine", "qst", "") - assert.Nil(t, job) - assert.EqualValues(t, engine.ErrEmptyQueue, err) - job, err = e.Peek("ns-engine", "qst", jobID) - assert.Nil(t, err) - assert.EqualValues(t, jobID, job.ID()) - assert.EqualValues(t, body, job.Body()) - assert.NotNil(t, job.Attributes()) - assert.EqualValues(t, "1", job.Attributes()["flag"]) - assert.EqualValues(t, "abc", job.Attributes()["label"]) -} - -func TestEngine_BatchConsume(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 7") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q7", - ID: "", - Body: body, - TTL: 10, - Delay: 3, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - queues := []string{"q7"} - jobs, err := e.BatchConsume("ns-engine", queues, 2, 5, 2) - if err != nil { - t.Fatalf("Failed to Batch consume: %s", err) - } - if len(jobs) != 0 { - t.Fatalf("Wrong job consumed") - } - - jobs, err = e.BatchConsume("ns-engine", queues, 2, 3, 2) - if err != nil { - t.Fatalf("Failed to Batch consume: %s", err) - } - if len(jobs) != 1 || !bytes.Equal(body, jobs[0].Body()) || jobID != jobs[0].ID() { - t.Fatalf("Mistmatched job data") - } - - // Consume some jobs - jobIDMap := map[string]bool{} - for i := 0; i < 4; i++ { - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q7", - ID: "", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - t.Log(jobID) - if err != nil { - t.Fatalf("Failed to publish: %s", err) - } - jobIDMap[jobID] = true - } - - // First time batch consume three jobs - jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if len(jobs) != 3 { - t.Fatalf("Mistmatched jobs count") - } - for _, job := range jobs { - if !bytes.Equal(body, job.Body()) || !jobIDMap[job.ID()] { - t.Fatalf("Mistmatched job data") - } - } - - // Second time batch consume can only get a single job - jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if len(jobs) != 1 { - t.Fatalf("Mistmatched jobs count") - } - if !bytes.Equal(body, jobs[0].Body()) || !jobIDMap[jobs[0].ID()] { - t.Fatalf("Mistmatched job data") - } - - // Third time batch consume will be blocked by 3s - jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3) - if err != nil { - t.Fatalf("Failed to consume: %s", err) - } - if len(jobs) != 0 { - t.Fatalf("Mistmatched jobs count") - } -} - -func TestEngine_PublishWithJobID(t *testing.T) { - e, err := NewEngine(R.Name, dummyPoolConf, R.Conn) - if err != nil { - panic(fmt.Sprintf("Setup engine error: %s", err)) - } - defer e.Shutdown() - body := []byte("hello msg 1") - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-engine", - Queue: "q8", - ID: "jobID1", - Body: body, - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - jobID, err := e.Publish(j) - t.Log(jobID) - assert.Nil(t, err) - - // Make sure the engine received the job - job, err := e.Consume("ns-engine", []string{"q8"}, 3, 0) - assert.EqualValues(t, jobID, job.ID()) -} diff --git a/engine/redis_v2/hooks/init.go b/engine/redis_v2/hooks/init.go deleted file mode 100644 index c352646..0000000 --- a/engine/redis_v2/hooks/init.go +++ /dev/null @@ -1,55 +0,0 @@ -package hooks - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -type performanceMetrics struct { - Latencies *prometheus.HistogramVec - QPS *prometheus.CounterVec -} - -var _metrics *performanceMetrics - -const ( - _namespace = "infra" - _subsystem = "lmstfy_redis_v2" -) - -func setupMetrics() { - labels := []string{"node", "command", "status"} - buckets := prometheus.ExponentialBuckets(1, 2, 16) - newHistogram := func(name string, labels ...string) *prometheus.HistogramVec { - histogram := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: _namespace, - Subsystem: _subsystem, - Name: name, - Buckets: buckets, - }, - labels, - ) - prometheus.MustRegister(histogram) - return histogram - } - newCounter := func(name string, labels ...string) *prometheus.CounterVec { - counters := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: _namespace, - Subsystem: _subsystem, - Name: name, - }, - labels, - ) - prometheus.MustRegister(counters) - return counters - } - _metrics = &performanceMetrics{ - Latencies: newHistogram("latency", labels...), - QPS: newCounter("qps", labels...), - } -} - -func init() { - setupMetrics() -} diff --git a/engine/redis_v2/hooks/metrics.go b/engine/redis_v2/hooks/metrics.go deleted file mode 100644 index 5203e2e..0000000 --- a/engine/redis_v2/hooks/metrics.go +++ /dev/null @@ -1,62 +0,0 @@ -package hooks - -import ( - "context" - "time" - - "github.com/go-redis/redis/v8" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - _contextStartTimeKey = iota + 1 - _contextSegmentKey -) - -type MetricsHook struct { - client *redis.Client -} - -func NewMetricsHook(client *redis.Client) *MetricsHook { - return &MetricsHook{client: client} -} - -func (hook MetricsHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) { - return context.WithValue(ctx, _contextStartTimeKey, time.Now()), nil -} - -func (hook MetricsHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error { - hook.record(ctx, cmd.Name(), cmd.Err()) - return nil -} - -func (hook MetricsHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - return context.WithValue(ctx, _contextStartTimeKey, time.Now()), nil -} - -func (hook MetricsHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error { - var firstErr error - for _, cmd := range cmds { - if cmd.Err() != nil { - firstErr = cmd.Err() - break - } - } - hook.record(ctx, "pipeline", firstErr) - return nil -} - -func (hook MetricsHook) record(ctx context.Context, cmd string, err error) { - startTime, ok := ctx.Value(_contextStartTimeKey).(time.Time) - if !ok { - return - } - durationMS := time.Since(startTime).Milliseconds() - status := "ok" - if err != nil && err != redis.Nil { - status = "error" - } - labels := prometheus.Labels{"node": hook.client.Options().Addr, "command": cmd, "status": status} - _metrics.QPS.With(labels).Inc() - _metrics.Latencies.With(labels).Observe(float64(durationMS)) -} diff --git a/engine/redis_v2/info.go b/engine/redis_v2/info.go deleted file mode 100644 index 90ae912..0000000 --- a/engine/redis_v2/info.go +++ /dev/null @@ -1,101 +0,0 @@ -package redis_v2 - -import ( - "strconv" - "strings" - "time" -) - -type RedisInfo struct { - MemUsed int64 // used_memory - MemMax int64 // maxmemory - NKeys int64 // total keys - NExpires int64 // keys with TTL - NClients int64 // connected_clients - NBlocking int64 // blocked_clients -} - -func GetRedisInfo(redis *RedisInstance) *RedisInfo { - info := &RedisInfo{} - - memoryInfo, err := redis.Conn.Info(dummyCtx, "memory").Result() - if err == nil { - lines := strings.Split(memoryInfo, "\r\n") - for _, l := range lines { - k, v, _ := parseColonSeparatedKV(l) - switch k { - case "used_memory": - info.MemUsed = v - case "maxmemory": - info.MemMax = v - } - } - } - keyInfo, err := redis.Conn.Info(dummyCtx, "keyspace").Result() - if err == nil { - lines := strings.Split(keyInfo, "\r\n") - for _, l := range lines { - splits := strings.SplitN(l, ":", 2) - if len(splits) != 2 || splits[0] != "db0" { - continue - } - splits2 := strings.SplitN(splits[1], ",", 3) - for _, s := range splits2 { - k, v, _ := parseEqualSeparatedKV(s) - switch k { - case "keys": - info.NKeys = v - case "expires": - info.NExpires = v - } - } - } - } - clientInfo, err := redis.Conn.Info(dummyCtx, "clients").Result() - if err == nil { - lines := strings.Split(clientInfo, "\r\n") - for _, l := range lines { - k, v, _ := parseColonSeparatedKV(l) - switch k { - case "connected_clients": - info.NClients = v - case "blocked_clients": - info.NBlocking = v - } - } - } - - return info -} - -func parseColonSeparatedKV(str string) (key string, value int64, err error) { - splits := strings.SplitN(str, ":", 2) - if len(splits) == 2 { - key = splits[0] - value, err = strconv.ParseInt(splits[1], 10, 64) - } - return -} - -func parseEqualSeparatedKV(str string) (key string, value int64, err error) { - splits := strings.SplitN(str, "=", 2) - if len(splits) == 2 { - key = splits[0] - value, err = strconv.ParseInt(splits[1], 10, 64) - } - return -} - -func RedisInstanceMonitor(redis *RedisInstance) { - for { - time.Sleep(5 * time.Second) - info := GetRedisInfo(redis) - - metrics.redisMaxMem.WithLabelValues(redis.Name).Set(float64(info.MemMax)) - metrics.redisMemUsed.WithLabelValues(redis.Name).Set(float64(info.MemUsed)) - metrics.redisConns.WithLabelValues(redis.Name).Set(float64(info.NClients)) - metrics.redisBlockings.WithLabelValues(redis.Name).Set(float64(info.NBlocking)) - metrics.redisKeys.WithLabelValues(redis.Name).Set(float64(info.NKeys)) - metrics.redisExpires.WithLabelValues(redis.Name).Set(float64(info.NExpires)) - } -} diff --git a/engine/redis_v2/info_test.go b/engine/redis_v2/info_test.go deleted file mode 100644 index 05b726f..0000000 --- a/engine/redis_v2/info_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package redis_v2 - -import "testing" - -func TestGetRedisInfo(t *testing.T) { - R.Conn.Set(dummyCtx, "info", 1, 0) - info := GetRedisInfo(R) - if info.NKeys < 1 { - t.Fatalf("Expected NKeys is at least 1") - } - if info.MemUsed <= 0 { - t.Fatalf("Expected MemUsed is non-zero") - } - if info.NClients < 1 { - t.Fatalf("Expected NClients is at least 1") - } -} diff --git a/engine/redis_v2/meta.go b/engine/redis_v2/meta.go deleted file mode 100644 index d43333e..0000000 --- a/engine/redis_v2/meta.go +++ /dev/null @@ -1,122 +0,0 @@ -package redis_v2 - -import ( - "sync" - - "github.com/sirupsen/logrus" -) - -/** -Record meta info passively. meta info includes: -- namespaces list -- queue list of namespace -*/ - -type MetaManager struct { - redis *RedisInstance - nsCache map[string]bool // namespace => bool - qCache map[string]bool // {namespace}+{queue} => bool - rwmu sync.RWMutex -} - -func NewMetaManager(redis *RedisInstance) *MetaManager { - m := &MetaManager{ - redis: redis, - nsCache: make(map[string]bool), - qCache: make(map[string]bool), - } - go m.initialize() - return m -} - -func (m *MetaManager) RecordIfNotExist(namespace, queue string) { - m.rwmu.RLock() - if m.nsCache[namespace] && m.qCache[join(namespace, queue)] { - m.rwmu.RUnlock() - return - } - m.rwmu.RUnlock() - - m.rwmu.Lock() - if m.nsCache[namespace] { - m.qCache[join(namespace, queue)] = true - m.rwmu.Unlock() - m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns", namespace), queue, 1) - } else { - m.nsCache[namespace] = true - m.qCache[join(namespace, queue)] = true - m.rwmu.Unlock() - m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns"), namespace, 1) - m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns", namespace), queue, 1) - } -} - -func (m *MetaManager) Remove(namespace, queue string) { - m.rwmu.Lock() - delete(m.nsCache, namespace) - delete(m.qCache, join(namespace, queue)) - m.rwmu.Unlock() - m.redis.Conn.HDel(dummyCtx, join(MetaPrefix, "ns", namespace), queue) -} - -func (m *MetaManager) ListNamespaces() (namespaces []string, err error) { - val, err := m.redis.Conn.HGetAll(dummyCtx, join(MetaPrefix, "ns")).Result() - if err != nil { - return nil, err - } - for k := range val { - namespaces = append(namespaces, k) - } - return namespaces, nil -} - -func (m *MetaManager) ListQueues(namespace string) (queues []string, err error) { - val, err := m.redis.Conn.HGetAll(dummyCtx, join(MetaPrefix, "ns", namespace)).Result() - if err != nil { - return nil, err - } - for k := range val { - queues = append(queues, k) - } - return queues, nil -} - -func (m *MetaManager) initialize() { - namespaces, err := m.ListNamespaces() - if err != nil { - logger.WithField("error", err).Error("initialize meta manager list namespaces error") - return - } - for _, n := range namespaces { - queues, err := m.ListQueues(n) - if err != nil { - logger.WithFields(logrus.Fields{ - "namespace": n, - "error": err, - }).Error("initialize meta manager list queues error") - return - } - for _, q := range queues { - m.rwmu.Lock() - m.nsCache[n] = true - m.qCache[join(n, q)] = true - m.rwmu.Unlock() - } - } -} - -func (m *MetaManager) Dump() (map[string][]string, error) { - data := make(map[string][]string) - namespaces, err := m.ListNamespaces() - if err != nil { - return nil, err - } - for _, n := range namespaces { - queues, err := m.ListQueues(n) - if err != nil { - return nil, err - } - data[n] = queues - } - return data, nil -} diff --git a/engine/redis_v2/metrics.go b/engine/redis_v2/metrics.go deleted file mode 100644 index 635587d..0000000 --- a/engine/redis_v2/metrics.go +++ /dev/null @@ -1,229 +0,0 @@ -package redis_v2 - -import ( - "fmt" - "strings" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type Metrics struct { - // engine related metrics - publishJobs *prometheus.CounterVec - consumeJobs *prometheus.CounterVec - consumeMultiJobs *prometheus.CounterVec - poolAddJobs *prometheus.CounterVec - poolGetJobs *prometheus.CounterVec - poolDeleteJobs *prometheus.CounterVec - timerAddJobs *prometheus.CounterVec - timerRemoveBackupJobs *prometheus.CounterVec - timerDueJobs *prometheus.CounterVec - timerFullBatches *prometheus.CounterVec - queueDirectPushJobs *prometheus.CounterVec - queuePopJobs *prometheus.CounterVec - deadletterRespawnJobs *prometheus.CounterVec - publishQueueJobs *prometheus.CounterVec - consumeQueueJobs *prometheus.CounterVec - jobElapsedMS *prometheus.HistogramVec - jobAckElapsedMS *prometheus.HistogramVec - - timerSizes *prometheus.GaugeVec - timerBackupSizes *prometheus.GaugeVec - queueSizes *prometheus.GaugeVec - deadletterSizes *prometheus.GaugeVec - - // redis instance related metrics - redisMaxMem *prometheus.GaugeVec - redisMemUsed *prometheus.GaugeVec - redisConns *prometheus.GaugeVec - redisBlockings *prometheus.GaugeVec - redisKeys *prometheus.GaugeVec - redisExpires *prometheus.GaugeVec -} - -var ( - metrics *Metrics -) - -const ( - Namespace = "infra" - Subsystem = "lmstfy_redis_v2" -) - -func setupMetrics() { - cv := newCounterVecHelper - gv := newGaugeVecHelper - hv := newHistogramHelper - metrics = &Metrics{ - publishJobs: cv("publish_jobs"), - consumeJobs: cv("consume_jobs"), - consumeMultiJobs: cv("consume_multi_jobs"), - poolAddJobs: cv("pool_add_jobs"), - poolGetJobs: cv("pool_get_jobs"), - poolDeleteJobs: cv("pool_delete_jobs"), - timerAddJobs: cv("timer_add_jobs"), - timerRemoveBackupJobs: cv("timer_remove_backup_jobs"), - timerDueJobs: cv("timer_due_jobs"), - timerFullBatches: cv("timer_full_batches"), - queueDirectPushJobs: cv("queue_direct_push_jobs"), - queuePopJobs: cv("queue_pop_jobs"), - deadletterRespawnJobs: cv("deadletter_respawn_jobs"), - publishQueueJobs: cv("publish_queue_jobs", "namespace", "queue"), - consumeQueueJobs: cv("consume_queue_jobs", "namespace", "queue"), - jobElapsedMS: hv("job_elapsed_ms", "namespace", "queue"), - jobAckElapsedMS: hv("job_ack_elapsed_ms", "namespace", "queue"), - - timerSizes: gv("timer_sizes"), - timerBackupSizes: gv("timer_backup_sizes"), - queueSizes: gv("queue_sizes", "namespace", "queue"), - deadletterSizes: gv("deadletter_sizes", "namespace", "queue"), - - redisMaxMem: gv("max_mem_bytes"), - redisMemUsed: gv("used_mem_bytes"), - redisConns: gv("connections"), - redisBlockings: gv("blocking_connections"), - redisKeys: gv("total_keys"), - redisExpires: gv("total_ttl_keys"), - } -} - -func newCounterVecHelper(name string, labels ...string) *prometheus.CounterVec { - labels = append([]string{"pool"}, labels...) // all metrics has this common field `pool` - opts := prometheus.CounterOpts{} - opts.Namespace = Namespace - opts.Subsystem = Subsystem - opts.Name = name - opts.Help = name - counters := prometheus.NewCounterVec(opts, labels) - prometheus.MustRegister(counters) - return counters -} - -func newGaugeVecHelper(name string, labels ...string) *prometheus.GaugeVec { - labels = append([]string{"pool"}, labels...) - opts := prometheus.GaugeOpts{} - opts.Namespace = Namespace - opts.Subsystem = Subsystem - opts.Name = name - opts.Help = name - gauges := prometheus.NewGaugeVec(opts, labels) - prometheus.MustRegister(gauges) - return gauges -} - -func newSummaryHelper(name string, labels ...string) *prometheus.SummaryVec { - labels = append([]string{"pool"}, labels...) - opts := prometheus.SummaryOpts{} - opts.Namespace = Namespace - opts.Subsystem = Subsystem - opts.Name = name - opts.Help = name - opts.Objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.001} - summary := prometheus.NewSummaryVec(opts, labels) - prometheus.MustRegister(summary) - return summary -} - -func newHistogramHelper(name string, labels ...string) *prometheus.HistogramVec { - labels = append([]string{"pool"}, labels...) - opts := prometheus.HistogramOpts{} - opts.Namespace = Namespace - opts.Subsystem = Subsystem - opts.Name = name - opts.Help = name - opts.Buckets = prometheus.ExponentialBuckets(15, 3.5, 7) - histogram := prometheus.NewHistogramVec(opts, labels) - prometheus.MustRegister(histogram) - return histogram -} - -type SizeProvider interface { - Size() (size int64, err error) -} - -type SizeMonitor struct { - redis *RedisInstance - timer *Timer - providers map[string]SizeProvider - - rwmu sync.RWMutex -} - -func NewSizeMonitor(redis *RedisInstance, timer *Timer, preloadData map[string][]string) *SizeMonitor { - m := &SizeMonitor{ - redis: redis, - timer: timer, - providers: make(map[string]SizeProvider), - } - for ns, queues := range preloadData { - for _, q := range queues { - m.MonitorIfNotExist(ns, q) - } - } - return m -} - -func (m *SizeMonitor) Loop() { - for { - time.Sleep(30 * time.Second) - m.collect() - } -} - -func (m *SizeMonitor) MonitorIfNotExist(namespace, queue string) { - qname := fmt.Sprintf("q/%s/%s", namespace, queue) - m.rwmu.RLock() - if m.providers[qname] != nil { // queue and deadletter are monitored together, so only test queue - m.rwmu.RUnlock() - return - } - m.rwmu.RUnlock() - dname := fmt.Sprintf("d/%s/%s", namespace, queue) - m.rwmu.Lock() - m.providers[qname] = NewQueue(namespace, queue, m.redis, nil) - m.providers[dname], _ = NewDeadLetter(namespace, queue, m.redis) - m.rwmu.Unlock() -} - -func (m *SizeMonitor) Remove(namespace, queue string) { - qname := fmt.Sprintf("q/%s/%s", namespace, queue) - dname := fmt.Sprintf("d/%s/%s", namespace, queue) - m.rwmu.Lock() - delete(m.providers, qname) - delete(m.providers, dname) - metrics.queueSizes.DeleteLabelValues(m.redis.Name, namespace, queue) - metrics.deadletterSizes.DeleteLabelValues(m.redis.Name, namespace, queue) - m.rwmu.Unlock() -} - -func (m *SizeMonitor) collect() { - s, err := m.timer.Size() - if err == nil { - metrics.timerSizes.WithLabelValues(m.redis.Name).Set(float64(s)) - } - backupSize, err := m.timer.BackupSize() - if err == nil { - metrics.timerBackupSizes.WithLabelValues(m.redis.Name).Set(float64(backupSize)) - } - m.rwmu.RLock() - for k, p := range m.providers { - s, err := p.Size() - if err != nil { - continue - } - splits := strings.SplitN(k, "/", 3) - switch splits[0] { - case "q": - metrics.queueSizes.WithLabelValues(m.redis.Name, splits[1], splits[2]).Set(float64(s)) - case "d": - metrics.deadletterSizes.WithLabelValues(m.redis.Name, splits[1], splits[2]).Set(float64(s)) - } - } - m.rwmu.RUnlock() -} - -func init() { - setupMetrics() -} diff --git a/engine/redis_v2/pool.go b/engine/redis_v2/pool.go deleted file mode 100644 index 138a2d8..0000000 --- a/engine/redis_v2/pool.go +++ /dev/null @@ -1,83 +0,0 @@ -package redis_v2 - -import ( - "time" - - go_redis "github.com/go-redis/redis/v8" - - "github.com/bitleak/lmstfy/engine" -) - -// Pool stores all the jobs' data. this is a global singleton per engine -// note: this `Pool` is NOT the same terminology as the EnginePool -type Pool struct { - redis *RedisInstance -} - -func NewPool(redis *RedisInstance) *Pool { - return &Pool{ - redis: redis, - } -} - -func PoolJobKey(j engine.Job) string { - return join(PoolPrefix, j.Namespace(), j.Queue(), j.ID()) -} - -func PoolJobKey2(namespace, queue, jobID string) string { - return join(PoolPrefix, namespace, queue, jobID) -} - -func PoolJobKeyPrefix(namespace, queue string) string { - return join(PoolPrefix, namespace, queue) -} - -func (p *Pool) Add(j engine.Job) error { - body := j.Body() - metrics.poolAddJobs.WithLabelValues(p.redis.Name).Inc() - - // SetNX return OK(true) if key didn't exist before. - ok, err := p.redis.Conn.SetNX(dummyCtx, PoolJobKey(j), body, time.Duration(j.TTL())*time.Second).Result() - if err != nil { - // Just retry once. - ok, err = p.redis.Conn.SetNX(dummyCtx, PoolJobKey(j), body, time.Duration(j.TTL())*time.Second).Result() - } - if err != nil { - return err - } - if !ok { - return engine.ErrJobExisted // Key existed before, avoid overwriting it, so return error - } - return err -} - -func (p *Pool) Get(namespace, queue, jobID string) (body []byte, ttlSecond uint32, err error) { - pipeline := p.redis.Conn.Pipeline() - jobKey := join(PoolPrefix, namespace, queue, jobID) - getCmd := pipeline.Get(dummyCtx, jobKey) - ttlCmd := pipeline.TTL(dummyCtx, jobKey) - _, err = pipeline.Exec(dummyCtx) - switch err { - case nil: - val := getCmd.Val() - ttl := int64(ttlCmd.Val().Seconds()) - if ttl < 0 { - // Use `0` to identify indefinite TTL, NOTE: in redis ttl=0 is possible when - // the key is not recycled fast enough. but here is okay we use `0` to identify - // indefinite TTL, because we issue GET cmd before TTL cmd, so the ttl must be > 0, - // OR GET cmd would fail. - ttl = 0 - } - metrics.poolGetJobs.WithLabelValues(p.redis.Name).Inc() - return []byte(val), uint32(ttl), nil - case go_redis.Nil: - return nil, 0, engine.ErrNotFound - default: - return nil, 0, err - } -} - -func (p *Pool) Delete(namespace, queue, jobID string) error { - metrics.poolDeleteJobs.WithLabelValues(p.redis.Name).Inc() - return p.redis.Conn.Del(dummyCtx, join(PoolPrefix, namespace, queue, jobID)).Err() -} diff --git a/engine/redis_v2/pool_test.go b/engine/redis_v2/pool_test.go deleted file mode 100644 index 7fd3558..0000000 --- a/engine/redis_v2/pool_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package redis_v2 - -import ( - "bytes" - "testing" - "time" - - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/engine/model" - - go_redis "github.com/go-redis/redis/v8" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/proto" -) - -func TestPool_Add(t *testing.T) { - p := NewPool(R) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-pool", - Queue: "q1", - ID: "", - Body: []byte("hello msg 1"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - if err := p.Add(job); err != nil { - t.Errorf("Failed to add job to pool: %s", err) - } -} - -// Test TTL -func TestPool_Add2(t *testing.T) { - p := NewPool(R) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-pool", - Queue: "q2", - ID: "", - Body: []byte("hello msg 2"), - TTL: 1, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - p.Add(job) - time.Sleep(2 * time.Second) - _, err := R.Conn.Get(dummyCtx, PoolJobKey(job)).Result() - if err != go_redis.Nil { - t.Fatalf("Expected to get nil result, but got: %s", err) - } - -} - -func TestPool_Delete(t *testing.T) { - p := NewPool(R) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-pool", - Queue: "q3", - ID: "", - Body: []byte("hello msg 3"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - p.Add(job) - if err := p.Delete(job.Namespace(), job.Queue(), job.ID()); err != nil { - t.Fatalf("Failed to delete job from pool: %s", err) - } -} - -func TestPool_Get(t *testing.T) { - p := NewPool(R) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-pool", - Queue: "q4", - ID: "", - Body: []byte("hello msg 4"), - TTL: 50, - Delay: 0, - Tries: 1, - Attributes: attributes, - }) - p.Add(job) - body, ttl, err := p.Get(job.Namespace(), job.Queue(), job.ID()) - if err != nil { - t.Fatalf("Failed to get job: %s", err) - } - res := &model.JobData{} - if err = proto.Unmarshal(body, res); err != nil { - t.Fatalf("Failed to unmarshal job: %s", err) - } - if !bytes.Equal(res.GetData(), []byte("hello msg 4")) { - t.Fatal("Mismatched job data") - } - assert.NotNil(t, res.GetAttributes()) - assert.EqualValues(t, "1", res.GetAttributes()["flag"]) - assert.EqualValues(t, "abc", res.GetAttributes()["label"]) - if ttl > 50 || 50-ttl > 2 { - t.Fatalf("Expected TTL is around 50 seconds") - } -} diff --git a/engine/redis_v2/queue.go b/engine/redis_v2/queue.go deleted file mode 100644 index 4b2cbd1..0000000 --- a/engine/redis_v2/queue.go +++ /dev/null @@ -1,263 +0,0 @@ -package redis_v2 - -import ( - "encoding/binary" - "errors" - "fmt" - "time" - - "github.com/bitleak/lmstfy/engine" - go_redis "github.com/go-redis/redis/v8" - "github.com/sirupsen/logrus" -) - -const ( - luaRPOPMultiQueuesScript = ` - for _, queue in ipairs(KEYS) do - local v = redis.call("RPOP", queue) - if v ~= false then - return {queue, v} - end - end - return {"", ""} -` -) - -var rpopMultiQueuesSHA string - -type QueueName struct { - Namespace string - Queue string -} - -func (k *QueueName) String() string { - return join(QueuePrefix, k.Namespace, k.Queue) -} - -func (k *QueueName) Decode(str string) error { - splits := splits(3, str) - if len(splits) != 3 || splits[0] != QueuePrefix { - return errors.New("invalid format") - } - k.Namespace = splits[1] - k.Queue = splits[2] - return nil -} - -// Queue is the "ready queue" that has all the jobs that can be consumed right now -type Queue struct { - name QueueName - redis *RedisInstance - timer *Timer - - destroySHA string -} - -func NewQueue(namespace, queue string, redis *RedisInstance, timer *Timer) *Queue { - return &Queue{ - name: QueueName{Namespace: namespace, Queue: queue}, - redis: redis, - timer: timer, - - // NOTE: deadletter and queue are actually the same data structure, we could reuse the lua script - // to empty the redis list (used as queue here). all we need to do is pass the queue name as the - // deadletter name. - destroySHA: deleteDeadletterSHA, - } -} - -func (q *Queue) Name() string { - return q.name.String() -} - -// Push a job into the queue, the job data format: {tries}{job id} -func (q *Queue) Push(j engine.Job) error { - if j.Tries() == 0 { - return nil - } - if j.Namespace() != q.name.Namespace || j.Queue() != q.name.Queue { - // Wrong queue for the job - return engine.ErrWrongQueue - } - metrics.queueDirectPushJobs.WithLabelValues(q.redis.Name).Inc() - val := structPack(j.Tries(), j.ID()) - if err := q.timer.addToBackup(q.name.Namespace, q.name.Queue, j.ID(), j.Tries()); err != nil { - return err - } - return q.redis.Conn.LPush(dummyCtx, q.Name(), val).Err() -} - -// Pop a job. If the tries > 0, add job to the "in-flight" timer with timestamp -// set to `TTR + now()`; Or we might just move the job to "dead-letter". -func (q *Queue) Poll(timeoutSecond, ttrSecond uint32) (jobID string, tries uint16, err error) { - _, jobID, tries, err = PollQueues(q.redis, q.timer, []QueueName{q.name}, timeoutSecond, ttrSecond) - return jobID, tries, err -} - -// Return number of the current in-queue jobs -func (q *Queue) Size() (size int64, err error) { - return q.redis.Conn.LLen(dummyCtx, q.name.String()).Result() -} - -// Peek a right-most element in the list without popping it -func (q *Queue) Peek() (jobID string, tries uint16, err error) { - val, err := q.redis.Conn.LIndex(dummyCtx, q.Name(), -1).Result() - switch err { - case nil: - // continue - case go_redis.Nil: - return "", 0, engine.ErrNotFound - default: - return "", 0, err - } - tries, jobID, err = structUnpack(val) - return jobID, tries, err -} - -func (q *Queue) Destroy() (count int64, err error) { - poolPrefix := PoolJobKeyPrefix(q.name.Namespace, q.name.Queue) - var batchSize int64 = 100 - for { - val, err := q.redis.Conn.EvalSha(dummyCtx, q.destroySHA, []string{q.Name(), poolPrefix}, batchSize).Result() - if err != nil { - if isLuaScriptGone(err) { - if err := PreloadDeadLetterLuaScript(q.redis); err != nil { - logger.WithField("err", err).Error("Failed to load deadletter lua script") - } - continue - } - return count, err - } - n, _ := val.(int64) - count += n - if n < batchSize { // Dead letter is empty - break - } - } - return count, nil -} - -func PreloadQueueLuaScript(redis *RedisInstance) error { - sha, err := redis.Conn.ScriptLoad(dummyCtx, luaRPOPMultiQueuesScript).Result() - if err != nil { - return fmt.Errorf("preload rpop multi lua script err: %s", err) - } - rpopMultiQueuesSHA = sha - return nil -} - -func popMultiQueues(redis *RedisInstance, queueNames []string) (string, string, error) { - if len(queueNames) == 1 { - val, err := redis.Conn.RPop(dummyCtx, queueNames[0]).Result() - return queueNames[0], val, err - } - vals, err := redis.Conn.EvalSha(dummyCtx, rpopMultiQueuesSHA, queueNames).Result() - if err != nil && isLuaScriptGone(err) { - if err = PreloadQueueLuaScript(redis); err != nil { - return "", "", err - } - vals, err = redis.Conn.EvalSha(dummyCtx, rpopMultiQueuesSHA, queueNames).Result() - } - if err != nil { - return "", "", err - } - fields, ok := vals.([]interface{}) - if !ok || len(fields) != 2 { - return "", "", errors.New("lua return value should be two elements array") - } - queueName, ok1 := fields[0].(string) - value, ok2 := fields[1].(string) - if !ok1 || !ok2 { - return "", "", errors.New("invalid lua value type") - } - if queueName == "" && value == "" { // queueName and value is empty means rpop without any values - return "", "", go_redis.Nil - } - return queueName, value, nil -} - -// Poll from multiple queues using blocking method; OR pop a job from one queue using non-blocking method -func PollQueues(redis *RedisInstance, timer *Timer, queueNames []QueueName, timeoutSecond, ttrSecond uint32) (queueName *QueueName, jobID string, retries uint16, err error) { - defer func() { - if jobID != "" { - metrics.queuePopJobs.WithLabelValues(redis.Name).Inc() - } - }() - - var val []string - keys := make([]string, len(queueNames)) - for i, k := range queueNames { - keys[i] = k.String() - } - if timeoutSecond > 0 { // Blocking poll - val, err = redis.Conn.BRPop(dummyCtx, time.Duration(timeoutSecond)*time.Second, keys...).Result() - } else { // Non-Blocking fetch - val = make([]string, 2) // Just to be coherent with BRPop return values - val[0], val[1], err = popMultiQueues(redis, keys) - } - switch err { - case nil: - // continue - case go_redis.Nil: - logger.Debug("Job not found") - return nil, "", 0, nil - default: - logger.WithField("err", err).Error("Failed to pop job from queue") - return nil, "", 0, err - } - queueName = &QueueName{} - if err := queueName.Decode(val[0]); err != nil { - logger.WithField("err", err).Error("Failed to decode queue name") - return nil, "", 0, err - } - tries, jobID, err := structUnpack(val[1]) - if err != nil { - logger.WithField("err", err).Error("Failed to unpack lua struct data") - return nil, "", 0, err - } - - if tries == 0 { - logger.WithFields(logrus.Fields{ - "jobID": jobID, - "ttr": ttrSecond, - "queue": queueName.String(), - }).Error("Job with tries == 0 appeared") - return nil, "", 0, fmt.Errorf("Job %s with tries == 0 appeared", jobID) - } - tries = tries - 1 - err = timer.Add(queueName.Namespace, queueName.Queue, jobID, ttrSecond, tries) // NOTE: tries is not decreased - if err != nil { - logger.WithFields(logrus.Fields{ - "err": err, - "jobID": jobID, - "ttr": ttrSecond, - "queue": queueName.String(), - }).Error("Failed to add job to timer for ttr") - return queueName, jobID, tries, err - } - return queueName, jobID, tries, nil -} - -// Pack (tries, jobID) into lua struct pack of format "HHHc0", in lua this can be done: -// ```local data = struct.pack("HHc0", tries, #job_id, job_id)``` -func structPack(tries uint16, jobID string) (data string) { - buf := make([]byte, 2+2+len(jobID)) - binary.LittleEndian.PutUint16(buf[0:], tries) - binary.LittleEndian.PutUint16(buf[2:], uint16(len(jobID))) - copy(buf[4:], jobID) - return string(buf) -} - -// Unpack the "HHc0" lua struct format, in lua this can be done: -// ```local tries, job_id = struct.unpack("HHc0", data)``` -func structUnpack(data string) (tries uint16, jobID string, err error) { - buf := []byte(data) - h1 := binary.LittleEndian.Uint16(buf[0:]) - h2 := binary.LittleEndian.Uint16(buf[2:]) - jobID = string(buf[4:]) - tries = h1 - if len(jobID) != int(h2) { - err = errors.New("corrupted data") - } - return -} diff --git a/engine/redis_v2/queue_test.go b/engine/redis_v2/queue_test.go deleted file mode 100644 index 1189ebc..0000000 --- a/engine/redis_v2/queue_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package redis_v2 - -import ( - "fmt" - "strconv" - "testing" - "time" - - "github.com/go-redis/redis/v8" - "github.com/stretchr/testify/assert" - - "github.com/bitleak/lmstfy/engine" -) - -func TestQueue_Push(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - q := NewQueue("ns-queue", "q1", R, timer) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-queue", - Queue: "q1", - ID: "", - Body: []byte("hello msg 1"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - if err := q.Push(job); err != nil { - t.Fatalf("Failed to push job into queue: %s", err) - } - - job2 := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-queue", - Queue: "q2", - ID: "", - Body: []byte("hello msg 1"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - - if err := q.Push(job2); err != engine.ErrWrongQueue { - t.Fatalf("Expected to get wrong queue error, but got: %s", err) - } -} - -func TestQueue_Poll(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - q := NewQueue("ns-queue", "q2", R, timer) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-queue", - Queue: "q2", - ID: "", - Body: []byte("hello msg 2"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - go func() { - time.Sleep(time.Second) - q.Push(job) - }() - jobID, _, err := q.Poll(2, 1) - if err != nil || jobID == "" { - t.Fatalf("Failed to poll job from queue: %s", err) - } - if job.ID() != jobID { - t.Fatal("Mismatched job") - } -} - -func TestQueue_Peek(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - q := NewQueue("ns-queue", "q3", R, timer) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-queue", - Queue: "q3", - ID: "", - Body: []byte("hello msg 3"), - TTL: 10, - Delay: 0, - Tries: 2, - Attributes: nil, - }) - q.Push(job) - jobID, tries, err := q.Peek() - if err != nil || jobID == "" || tries != 2 { - t.Fatalf("Failed to peek job from queue: %s", err) - } - if job.ID() != jobID { - t.Fatal("Mismatched job") - } -} - -func TestQueue_Destroy(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - q := NewQueue("ns-queue", "q4", R, timer) - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-queue", - Queue: "q4", - ID: "", - Body: []byte("hello msg 4"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - q.Push(job) - count, err := q.Destroy() - if err != nil { - t.Fatalf("Failed to destroy queue: %s", err) - } - if count != 1 { - t.Fatalf("Mismatched deleted jobs count") - } - size, _ := q.Size() - if size != 0 { - t.Fatalf("Destroyed queue should be of size 0") - } -} - -func TestQueue_Tries(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - namespace := "ns-queue" - queue := "q5" - q := NewQueue(namespace, queue, R, timer) - var maxTries uint16 = 2 - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: namespace, - Queue: queue, - ID: "", - Body: []byte("hello msg 5"), - TTL: 30, - Delay: 0, - Tries: maxTries, - Attributes: nil, - }) - q.Push(job) - pool := NewPool(R) - pool.Add(job) - jobID, tries, err := q.Poll(2, 1) - if err != nil || jobID == "" { - t.Fatalf("Failed to poll job from queue: %s", err) - } - if tries != (maxTries - 1) { - t.Fatalf("Expected to get tries 1 , but got " + strconv.Itoa(int(tries))) - } - if job.ID() != jobID { - t.Fatal("Mismatched job") - } - jobID, tries, err = q.Poll(5, 1) - if err != nil || jobID == "" { - t.Fatalf("Failed to poll job from queue: %s", err) - } - if tries != (maxTries - 2) { - t.Fatalf("Expected to get tries 0 , but got " + strconv.Itoa(int(tries))) - } - if job.ID() != jobID { - t.Fatal("Mismatched job") - } -} - -func TestStructPacking(t *testing.T) { - var tries uint16 = 23 - jobID := " a test ID#" - data := structPack(tries, jobID) - tries2, jobID2, err := structUnpack(data) - if err != nil { - t.Fatal("Failed to unpack") - } - if tries != tries2 || jobID != jobID2 { - t.Fatal("Mismatched unpack data") - } -} - -func TestPopMultiQueues(t *testing.T) { - timer, err := NewTimer("timer_set_q", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - - namespace := "ns-queueName" - queues := make([]QueueName, 3) - queueNames := make([]string, 3) - for i, queueName := range []string{"q6", "q7", "q8"} { - queues[i] = QueueName{Namespace: namespace, Queue: queueName} - queueNames[i] = queues[i].String() - } - gotQueueName, gotVal, err := popMultiQueues(R, queueNames) - if err != redis.Nil { - t.Fatalf("redis nil err was expected, but got %s", err.Error()) - } - if gotQueueName != "" || gotVal != "" || err != redis.Nil { - t.Fatal("queueName name and value should be empty") - } - - queueName := "q7" - q := NewQueue(namespace, queueName, R, timer) - msg := "hello msg 7" - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: namespace, - Queue: queueName, - ID: "", - Body: []byte(msg), - TTL: 30, - Delay: 0, - Tries: 2, - Attributes: nil, - }) - - q.Push(job) - gotQueueName, gotVal, err = popMultiQueues(R, queueNames) - if err != nil { - t.Fatalf("redis nil err was expected, but got %s", err.Error()) - } - if gotQueueName != q.Name() { - t.Fatalf("invalid queueName name, %s was expected but got %s", q.Name(), gotQueueName) - } - - // single queue condition - queueName = "q8" - job = engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: namespace, - Queue: queueName, - ID: "", - Body: []byte(msg), - TTL: 30, - Delay: 0, - Tries: 2, - Attributes: nil, - }) - q = NewQueue(namespace, queueName, R, timer) - q.Push(job) - gotQueueName, gotVal, err = popMultiQueues(R, []string{queueNames[2]}) - if err != nil { - t.Fatalf("redis nil err was expected, but got %s", err.Error()) - } - if gotQueueName != q.Name() { - t.Fatalf("invalid queueName name, %s was expected but got %s", q.Name(), gotQueueName) - } -} - -func TestQueue_Backup(t *testing.T) { - timer, err := NewTimer("timer_set_for_test_backup", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - - namespace := "ns-queue" - queue := "q9" - q := NewQueue(namespace, queue, R, timer) - count := 10 - for i := 0; i < count; i++ { - delay := uint32(0) - if i%2 == 0 { - delay = 1 - } - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: namespace, - Queue: queue, - ID: "", - Body: []byte("hello msg"), - TTL: 30, - Delay: delay, - Tries: 2, - Attributes: nil, - }) - q.Push(job) - pool := NewPool(R) - pool.Add(job) - } - backupName := timer.BackupName() - memberScores, err := q.redis.Conn.ZRangeWithScores(dummyCtx, backupName, 0, -1).Result() - assert.Nil(t, err) - now := time.Now().Unix() - for _, memberScore := range memberScores { - gotNamespace, gotQueue, gotJobID, err := structUnpackTimerData([]byte(memberScore.Member.(string))) - assert.Nil(t, err) - assert.Equal(t, namespace, gotNamespace) - assert.Equal(t, queue, gotQueue) - assert.Equal(t, 26, len(gotJobID)) - timestamp, tries := decodeScore(memberScore.Score) - assert.Equal(t, uint16(2), tries) - assert.LessOrEqual(t, timestamp, now) - } - - for i := 0; i < 10; i++ { - jobID, _, err := q.Poll(2, 1) - if err != nil || jobID == "" { - t.Fatalf("Failed to poll job from queue: %s", err) - } - } - backupCount, err := q.redis.Conn.ZCard(dummyCtx, backupName).Result() - assert.Nil(t, err) - assert.Equal(t, int64(0), backupCount) -} diff --git a/engine/redis_v2/setup.go b/engine/redis_v2/setup.go deleted file mode 100644 index a27e235..0000000 --- a/engine/redis_v2/setup.go +++ /dev/null @@ -1,70 +0,0 @@ -package redis_v2 - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/bitleak/lmstfy/storage" - "github.com/go-redis/redis/v8" - "github.com/sirupsen/logrus" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/helper" -) - -const ( - MaxRedisConnections = 5000 - VersionV2 = "v2" - - maxPumpInThresholdSeconds = 24 * 60 * 60 -) - -var ( - logger *logrus.Logger - dummyCtx = context.TODO() -) - -// SetLogger will set the logger for engine -func SetLogger(l *logrus.Logger) { - logger = l -} - -// Setup set the essential config of redis engine -func Setup(conf *config.Config) error { - for name, poolConf := range conf.Pool { - // Only register v2 engine when the version is explicitly specified as "v2" - if !strings.EqualFold(poolConf.Version, VersionV2) { - continue - } - - if poolConf.PoolSize == 0 { - poolConf.PoolSize = MaxRedisConnections - } - opt := &redis.Options{} - // By Default, the timeout for RW is 3 seconds, we might get few error - // when redis server is doing AOF rewrite. We prefer data integrity over speed. - opt.ReadTimeout = 30 * time.Second - opt.WriteTimeout = 30 * time.Second - opt.MinIdleConns = 10 - cli := helper.NewRedisClient(&poolConf, opt) - if cli.Ping(dummyCtx).Err() != nil { - return fmt.Errorf("redis server %s was not alive", poolConf.Addr) - } - e, err := NewEngine(name, &poolConf, cli) - if err != nil { - return fmt.Errorf("setup engine error: %s", err) - } - engine.Register(engine.KindRedisV2, name, e) - if poolConf.EnableSecondaryStorage && conf.HasSecondaryStorage() { - threshold := poolConf.SecondaryStorageThresholdSeconds / 3 - if threshold >= maxPumpInThresholdSeconds { - threshold = maxPumpInThresholdSeconds - } - storage.Get().AddPool(name, e, threshold) - } - } - return nil -} diff --git a/engine/redis_v2/setup_test.go b/engine/redis_v2/setup_test.go deleted file mode 100644 index f4fe75d..0000000 --- a/engine/redis_v2/setup_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package redis_v2 - -import ( - "fmt" - "os" - "testing" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/helper" - "github.com/bitleak/lmstfy/log" - "github.com/sirupsen/logrus" -) - -var ( - R *RedisInstance - testConfig *config.PresetConfigForTest -) - -func setup(CONF *config.Config) { - logger = logrus.New() - level, _ := logrus.ParseLevel(CONF.LogLevel) - logger.SetLevel(level) - - log.Setup(CONF.LogFormat, CONF.LogDir, CONF.LogLevel, "ERROR") - poolConf := CONF.Pool["default"] - conn := helper.NewRedisClient(&poolConf, nil) - err := conn.Ping(dummyCtx).Err() - if err != nil { - panic(fmt.Sprintf("Failed to ping: %s", err)) - } - err = conn.FlushDB(dummyCtx).Err() - if err != nil { - panic(fmt.Sprintf("Failed to flush db: %s", err)) - } - - R = &RedisInstance{ - Name: "unittest", - Conn: conn, - } - - if err = PreloadDeadLetterLuaScript(R); err != nil { - panic(fmt.Sprintf("Failed to preload deadletter lua script: %s", err)) - } -} - -func TestMain(m *testing.M) { - presetConfig, err := config.CreatePresetForTest(VersionV2) - if err != nil { - panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err)) - } - defer presetConfig.Destroy() - setup(presetConfig.Config) - testConfig = presetConfig - ret := m.Run() - os.Exit(ret) -} diff --git a/engine/redis_v2/timer.go b/engine/redis_v2/timer.go deleted file mode 100644 index 803bdc2..0000000 --- a/engine/redis_v2/timer.go +++ /dev/null @@ -1,344 +0,0 @@ -package redis_v2 - -import ( - "encoding/binary" - "errors" - "math" - "strings" - "time" - - "github.com/go-redis/redis/v8" -) - -const ( - luaPumpBackupScript = ` -local zset_key = KEYS[1] -local queue_prefix = KEYS[2] -local pool_prefix = KEYS[3] -local new_score = tonumber(ARGV[1]) -local max_score = ARGV[2] -local limit = ARGV[3] -local backup_key = table.concat({zset_key, "backup"}, "/") - -local memberScores = redis.call("ZRANGEBYSCORE", backup_key, 0, max_score, "WITHSCORES", "LIMIT", 0, limit) - -if #memberScores == 0 then - return 0 -end - -local toBeRemovedMembers = {} -for i = 1, #memberScores, 2 do - local member = memberScores[i] - local score = tonumber(memberScores[i+1]) - local ns, q, job_id = struct.unpack("Hc0Hc0Hc0", member) - local need_next_check = true - -- ignore jobs which are expired or ACKed - if redis.call("EXISTS", table.concat({pool_prefix, ns, q, job_id}, "/")) == 0 then - -- the job was expired or acked, just discard it - table.insert(toBeRemovedMembers, member) - need_next_check = false - end - - local oldest_elem = nil - if need_next_check then - oldest_elem = redis.call("LINDEX", table.concat({queue_prefix, ns, q}, "/"), "-1") - if not oldest_elem then - -- no elem in the queue means those queue jobs in backup are lost, - -- since consumer can't fetch them. - table.insert(toBeRemovedMembers, member) - redis.call("ZADD", zset_key, score, member) - need_next_check = false - end - end - - if need_next_check then - -- the score in backup is updated by queuing time, so it means those jobs - -- should be consumed since they are older than first element in ready queue - local tries, oldest_job_id = struct.unpack("HHc0", oldest_elem) - local oldest_member = struct.pack("Hc0Hc0Hc0", #ns, ns, #q, q, #oldest_job_id, oldest_job_id) - local oldest_score = redis.call("ZSCORE", backup_key, oldest_member) - if oldest_score and tonumber(oldest_score) > score then - table.insert(toBeRemovedMembers, member) - local tries = bit.band(score, 0xffff) - redis.call("ZADD", zset_key, new_score+tries, member) - end - end -end - -if #toBeRemovedMembers > 0 then - redis.call("ZREM", backup_key, unpack(toBeRemovedMembers)) -end -return #toBeRemovedMembers -` - luaPumpQueueScript = ` -local zset_key = KEYS[1] -local output_queue_prefix = KEYS[2] -local pool_prefix = KEYS[3] -local output_deadletter_prefix = KEYS[4] -local max_score= ARGV[1] -local limit = ARGV[2] - -local backup_key = table.concat({zset_key, "backup"}, "/") -local expiredMembers = redis.call("ZRANGEBYSCORE", zset_key, 0, max_score, "WITHSCORES", "LIMIT", 0, limit) - -if #expiredMembers == 0 then - return 0 -end - --- we want to remove those members after pumping into ready queue, --- so need a new array to record members without score. -local toBeRemovedMembers = {} -for i = 1, #expiredMembers, 2 do - local v = expiredMembers[i] - table.insert(toBeRemovedMembers, v) - local score = tonumber(expiredMembers[i+1]) - local tries = bit.band(score, 0xffff) - local ns, q, job_id = struct.unpack("Hc0Hc0Hc0", v) - if redis.call("EXISTS", table.concat({pool_prefix, ns, q, job_id}, "/")) > 0 then - -- only pump job to ready queue/dead letter if the job did not expire - if tries == 0 then - -- no more tries, move to dead letter - local val = struct.pack("HHc0", 1, #job_id, job_id) - redis.call("PERSIST", table.concat({pool_prefix, ns, q, job_id}, "/")) -- remove ttl - redis.call("LPUSH", table.concat({output_deadletter_prefix, ns, q}, "/"), val) - redis.call("ZREM", backup_key, v) - else - -- move to ready queue - local val = struct.pack("HHc0", tonumber(tries), #job_id, job_id) - redis.call("LPUSH", table.concat({output_queue_prefix, ns, q}, "/"), val) - redis.call("ZADD", backup_key, score, v) - end - else - -- the job was expired or acked, just discard it - redis.call("ZREM", backup_key, v) - end -end -redis.call("ZREM", zset_key, unpack(toBeRemovedMembers)) -return #toBeRemovedMembers -` -) - -// Timer is the other way of saying "delay queue". timer kick jobs into ready queue when -// it's ready. -type Timer struct { - name string - redis *RedisInstance - interval time.Duration - checkBackupInterval time.Duration - shutdown chan struct{} - - pumpSHA string - pumpBackupSHA string -} - -// NewTimer return an instance of delay queue -func NewTimer(name string, redis *RedisInstance, interval, checkBackupInterval time.Duration) (*Timer, error) { - timer := &Timer{ - name: name, - redis: redis, - interval: interval, - checkBackupInterval: checkBackupInterval, - shutdown: make(chan struct{}), - } - - // Preload the lua scripts - sha, err := redis.Conn.ScriptLoad(dummyCtx, luaPumpQueueScript).Result() - if err != nil { - logger.WithField("err", err).Error("Failed to preload lua script in timer") - return nil, err - } - timer.pumpSHA = sha - - backupSHA, err := redis.Conn.ScriptLoad(dummyCtx, luaPumpBackupScript).Result() - if err != nil { - logger.WithField("err", err).Error("Failed to preload lua script in timer") - return nil, err - } - timer.pumpBackupSHA = backupSHA - - go timer.tick() - return timer, nil -} - -func (t *Timer) Name() string { - return t.name -} - -// encodeScore will encode timestamp(unix second) and tries as score. -// Be careful that the underlay of Lua number is double, so it will -// lose precious if overrun the 53bit. -func encodeScore(timestamp int64, tries uint16) float64 { - return float64(((timestamp & 0xffffffff) << 16) | int64(tries&0xffff)) -} - -// decodeScore will decode the score into timestamp and tries -func decodeScore(score float64) (int64, uint16) { - val := int64(score) - timestamp := (val >> 16) & 0xffffffff - tries := uint16(val & 0xffff) - return timestamp, tries -} - -// structPackTimerData will struct-pack the data in the format `Hc0Hc0Hc0`: -// -// {namespace len}{namespace}{queue len}{queue}{jobID len}{jobID} -// -// length are 2-byte uint16 in little-endian -func structPackTimerData(namespace, queue, jobID string) []byte { - namespaceLen := len(namespace) - queueLen := len(queue) - jobIDLen := len(jobID) - buf := make([]byte, 2+namespaceLen+2+queueLen+2+jobIDLen) - binary.LittleEndian.PutUint16(buf[0:], uint16(namespaceLen)) - copy(buf[2:], namespace) - binary.LittleEndian.PutUint16(buf[2+namespaceLen:], uint16(queueLen)) - copy(buf[2+namespaceLen+2:], queue) - binary.LittleEndian.PutUint16(buf[2+namespaceLen+2+queueLen:], uint16(jobIDLen)) - copy(buf[2+namespaceLen+2+queueLen+2:], jobID) - return buf -} - -func structUnpackTimerData(data []byte) (namespace, queue, jobID string, err error) { - namespaceLen := binary.LittleEndian.Uint16(data) - namespace = string(data[2 : namespaceLen+2]) - queueLen := binary.LittleEndian.Uint16(data[2+namespaceLen:]) - queue = string(data[2+namespaceLen+2 : 2+namespaceLen+2+queueLen]) - JobIDLen := binary.LittleEndian.Uint16(data[2+namespaceLen+2+queueLen:]) - jobID = string(data[2+namespaceLen+2+queueLen+2:]) - if len(jobID) != int(JobIDLen) { - return "", "", "", errors.New("corrupted data") - } - return -} - -func (t *Timer) Add(namespace, queue, jobID string, delaySecond uint32, tries uint16) error { - metrics.timerAddJobs.WithLabelValues(t.redis.Name).Inc() - timestamp := time.Now().Unix() + int64(delaySecond) - - score := encodeScore(timestamp, tries) - member := structPackTimerData(namespace, queue, jobID) - err := t.redis.Conn.ZAdd(dummyCtx, t.Name(), &redis.Z{Score: score, Member: member}).Err() - if err != nil { - return err - } - - // We can ignore the error when removing job id from the backup queue - // coz it harms nothing even respawn them into the timer set. - _ = t.removeFromBackup(namespace, queue, jobID) - return nil -} - -func (t *Timer) BackupName() string { - return strings.Join([]string{t.Name(), "backup"}, "/") -} - -func (t *Timer) addToBackup(namespace, queue, jobID string, tries uint16) error { - score := encodeScore(time.Now().Unix(), tries) - member := structPackTimerData(namespace, queue, jobID) - return t.redis.Conn.ZAdd(dummyCtx, t.BackupName(), &redis.Z{Score: score, Member: member}).Err() -} - -func (t *Timer) removeFromBackup(namespace, queue, jobID string) error { - member := structPackTimerData(namespace, queue, jobID) - metrics.timerRemoveBackupJobs.WithLabelValues(t.redis.Name).Inc() - return t.redis.Conn.ZRem(dummyCtx, t.BackupName(), member).Err() -} - -// Tick pump all due jobs to the target queue -func (t *Timer) tick() { - tick := time.NewTicker(t.interval) - checkBackupTicker := time.NewTicker(t.checkBackupInterval) - for { - select { - case now := <-tick.C: - currentSecond := now.Unix() - t.pump(currentSecond) - case now := <-checkBackupTicker.C: - t.pumpBackup(now.Unix()) - case <-t.shutdown: - return - } - } -} - -func (t *Timer) pumpBackup(currentSecond int64) { - maxScore := encodeScore(currentSecond-int64(t.checkBackupInterval.Seconds()), math.MaxUint16) - newScore := encodeScore(currentSecond, 0) - val, err := t.redis.Conn.EvalSha(dummyCtx, t.pumpBackupSHA, - []string{t.Name(), QueuePrefix, PoolPrefix}, - newScore, maxScore, BatchSize, - ).Result() - - if err != nil { - if isLuaScriptGone(err) { // when redis restart, the script needs to be uploaded again - sha, err := t.redis.Conn.ScriptLoad(dummyCtx, luaPumpBackupScript).Result() - if err != nil { - logger.WithField("err", err).Error("Failed to reload script") - time.Sleep(time.Second) - return - } - t.pumpBackupSHA = sha - } - logger.WithField("err", err).Error("Failed to pump") - - val, err = t.redis.Conn.EvalSha(dummyCtx, t.pumpBackupSHA, - []string{t.Name(), QueuePrefix, PoolPrefix}, - newScore, maxScore, BatchSize, - ).Result() - if err != nil { - logger.WithField("err", err).Error("Failed to pump") - return - } - } - n, _ := val.(int64) - if n > 0 { - logger.WithField("count", n).Warn("Find lost jobs") - } -} - -func (t *Timer) pump(currentSecond int64) { - maxScore := encodeScore(currentSecond, math.MaxUint16) - for { - val, err := t.redis.Conn.EvalSha(dummyCtx, t.pumpSHA, - []string{t.Name(), QueuePrefix, PoolPrefix, DeadLetterPrefix}, - maxScore, BatchSize, - ).Result() - - if err != nil { - if isLuaScriptGone(err) { // when redis restart, the script needs to be uploaded again - sha, err := t.redis.Conn.ScriptLoad(dummyCtx, luaPumpQueueScript).Result() - if err != nil { - logger.WithField("err", err).Error("Failed to reload script") - time.Sleep(time.Second) - return - } - t.pumpSHA = sha - } - logger.WithField("err", err).Error("Failed to pump") - time.Sleep(time.Second) - return - } - n, _ := val.(int64) - logger.WithField("count", n).Debug("Due jobs") - metrics.timerDueJobs.WithLabelValues(t.redis.Name).Add(float64(n)) - if n == BatchSize { - // There might have more expired jobs to pump - metrics.timerFullBatches.WithLabelValues(t.redis.Name).Inc() - time.Sleep(10 * time.Millisecond) // Hurry up! accelerate pumping the due jobs - continue - } - return - } -} - -func (t *Timer) Shutdown() { - close(t.shutdown) -} - -func (t *Timer) Size() (size int64, err error) { - return t.redis.Conn.ZCard(dummyCtx, t.name).Result() -} - -func (t *Timer) BackupSize() (int64, error) { - return t.redis.Conn.ZCard(dummyCtx, t.BackupName()).Result() -} diff --git a/engine/redis_v2/timer_test.go b/engine/redis_v2/timer_test.go deleted file mode 100644 index e17d9a1..0000000 --- a/engine/redis_v2/timer_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package redis_v2 - -import ( - "fmt" - "strconv" - "testing" - "time" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/bitleak/lmstfy/engine" -) - -func TestTimer_Add(t *testing.T) { - timer, err := NewTimer("timer_set_1", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-timer", - Queue: "q1", - ID: "", - Body: []byte("hello msg 1"), - TTL: 10, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - if err = timer.Add(job.Namespace(), job.Queue(), job.ID(), 10, 1); err != nil { - t.Errorf("Failed to add job to timer: %s", err) - } -} - -func TestTimer_Tick(t *testing.T) { - timer, err := NewTimer("timer_set_2", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-timer", - Queue: "q2", - ID: "", - Body: []byte("hello msg 2"), - TTL: 5, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - pool := NewPool(R) - pool.Add(job) - timer.Add(job.Namespace(), job.Queue(), job.ID(), 3, 1) - errChan := make(chan error, 1) - go func() { - var err error = nil - defer func() { - // BRPop could panic - if r := recover(); r != nil { - err = fmt.Errorf("recover with panic %v", r) - } - errChan <- err - }() - val, err := R.Conn.BRPop(dummyCtx, 5*time.Second, join(QueuePrefix, "ns-timer", "q2")).Result() - if err != nil || len(val) == 0 { - err = fmt.Errorf("Failed to pop the job from target queue") - return - } - tries, jobID, err := structUnpack(val[1]) - if err != nil { - err = fmt.Errorf("Failed to decode the job pop from queue") - return - } - if tries != 1 || jobID != job.ID() { - err = fmt.Errorf("Job data mismatched") - return - } - }() - err = <-errChan - if err != nil { - t.Error(err) - } -} - -func TestBackupTimer_BeforeOldestScore(t *testing.T) { - timer, err := NewTimer("test_backup_before_oldest_score", R, time.Second, time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - - pool := NewPool(R) - ns := "ns-test-backup" - queueName := "q0" - queue := NewQueue(ns, queueName, R, timer) - count := 10 - for i := 0; i < count; i++ { - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: ns, - Queue: queueName, - ID: "", - Body: []byte("hello msg" + strconv.Itoa(i)), - TTL: 100, - Delay: 1, - Tries: 3, - Attributes: nil, - }) - pool.Add(job) - if i%2 == 0 { - queue.Push(job) - } else { - timer.Add(job.Namespace(), job.Queue(), job.ID(), job.Delay(), job.Tries()) - } - } - // make sure all jobs are in the ready queue and consume them without ACK, - // so they should appear in the backup queue. - time.Sleep(2 * time.Second) - assert.Equal(t, int64(count), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) - for i := 0; i < count/2; i++ { - _, err := R.Conn.BRPop(dummyCtx, time.Second, queue.Name()).Result() - assert.Nil(t, err) - } - require.Equal(t, int64(count)/2, R.Conn.LLen(dummyCtx, queue.Name()).Val()) - time.Sleep(3 * time.Second) - // all jobs should requeue in ready queue again - require.Equal(t, int64(count), R.Conn.LLen(dummyCtx, queue.Name()).Val()) - require.Equal(t, int64(count), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) - - for i := 0; i < count; i++ { - val, err := R.Conn.BRPop(dummyCtx, time.Second, queue.Name()).Result() - assert.Nil(t, err) - tries, jobID, err := structUnpack(val[1]) - assert.Nil(t, err) - assert.Equal(t, uint16(3), tries) - assert.Nil(t, pool.Delete(ns, queueName, jobID)) - } - // backup jobs should be disappeared after jobs were ACKed - time.Sleep(2 * time.Second) - require.Equal(t, int64(0), R.Conn.LLen(dummyCtx, queue.Name()).Val()) - require.Equal(t, int64(0), R.Conn.ZCard(dummyCtx, t.Name()).Val()) - require.Equal(t, int64(0), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) -} - -func TestBackupTimer_EmptyReadyQueue(t *testing.T) { - timer, err := NewTimer("test_backup_timer_set", R, time.Second, time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer timer.Shutdown() - - pool := NewPool(R) - ns := "ns-test-backup" - queueName := "q1" - queue := NewQueue(ns, queueName, R, timer) - count := 10 - for i := 0; i < count; i++ { - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: ns, - Queue: queueName, - ID: "", - Body: []byte("hello msg" + strconv.Itoa(i)), - TTL: 100, - Delay: 1, - Tries: 3, - Attributes: nil, - }) - - pool.Add(job) - if i%2 == 0 { - queue.Push(job) - } else { - timer.Add(job.Namespace(), job.Queue(), job.ID(), job.Delay(), job.Tries()) - } - } - // make sure all jobs are in the ready queue and consume them without ACK, - // so they should appear in the backup queue. - time.Sleep(time.Second) - assert.Equal(t, int64(count), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) - for i := 0; i < count; i++ { - _, err := R.Conn.BRPop(dummyCtx, time.Second, queue.Name()).Result() - assert.Nil(t, err) - } - time.Sleep(2 * time.Second) - // all jobs should requeue in ready queue - require.Equal(t, int64(count), R.Conn.LLen(dummyCtx, queue.Name()).Val()) - require.Equal(t, int64(count), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) - - for i := 0; i < count; i++ { - val, err := R.Conn.BRPop(dummyCtx, time.Second, queue.Name()).Result() - assert.Nil(t, err) - tries, jobID, err := structUnpack(val[1]) - assert.Nil(t, err) - assert.Equal(t, uint16(3), tries) - assert.Nil(t, pool.Delete(ns, queueName, jobID)) - } - // backup jobs should be disappeared after jobs were ACKed - time.Sleep(2 * time.Second) - require.Equal(t, int64(0), R.Conn.LLen(dummyCtx, queue.Name()).Val()) - require.Equal(t, int64(0), R.Conn.ZCard(dummyCtx, t.Name()).Val()) - require.Equal(t, int64(0), R.Conn.ZCard(dummyCtx, timer.BackupName()).Val()) -} - -func BenchmarkTimer(b *testing.B) { - // Disable logging temporarily - logger.SetLevel(logrus.ErrorLevel) - defer logger.SetLevel(logrus.DebugLevel) - - t, err := NewTimer("timer_set_3", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - defer t.Shutdown() - b.Run("Add", benchmarkTimer_Add(t)) - - b.Run("Pop", benchmarkTimer_Pop(t)) -} - -func benchmarkTimer_Add(timer *Timer) func(b *testing.B) { - pool := NewPool(R) - return func(b *testing.B) { - for i := 0; i < b.N; i++ { - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-timer", - Queue: "q3", - ID: "", - Body: []byte("hello msg 1"), - TTL: 100, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - pool.Add(job) - timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1) - } - } -} - -func benchmarkTimer_Pop(timer *Timer) func(b *testing.B) { - return func(b *testing.B) { - key := join(QueuePrefix, "ns-timer", "q3") - b.StopTimer() - pool := NewPool(R) - for i := 0; i < b.N; i++ { - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-timer", - Queue: "q3", - ID: "", - Body: []byte("hello msg 1"), - TTL: 100, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - pool.Add(job) - timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1) - } - b.StartTimer() - for i := 0; i < b.N; i++ { - R.Conn.BRPop(dummyCtx, 5*time.Second, key) - } - } -} - -// How long did it take to fire 10000 due jobs -func BenchmarkTimer_Pump(b *testing.B) { - // Disable logging temporarily - logger.SetLevel(logrus.ErrorLevel) - defer logger.SetLevel(logrus.DebugLevel) - - b.StopTimer() - - pool := NewPool(R) - timer, err := NewTimer("timer_set_4", R, time.Second, 600*time.Second) - if err != nil { - panic(fmt.Sprintf("Failed to new timer: %s", err)) - } - timer.Shutdown() - for i := 0; i < 10000; i++ { - job := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: "ns-timer", - Queue: "q4", - ID: "", - Body: []byte("hello msg 2"), - TTL: 5, - Delay: 0, - Tries: 1, - Attributes: nil, - }) - pool.Add(job) - timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1) - } - - b.StartTimer() - timer.pump(time.Now().Unix() + 1) -} - -func TestScore_Encode(t *testing.T) { - now := time.Now().Unix() - tries := uint16(123) - - for i := 0; i < 1000; i++ { - score := encodeScore(now+int64(i), tries) - gotTimestamp, gotTries := decodeScore(score) - require.Equal(t, tries, gotTries) - require.Equal(t, now+int64(i), gotTimestamp) - } -} diff --git a/engine/redis_v2/utils.go b/engine/redis_v2/utils.go deleted file mode 100644 index 6df6c6e..0000000 --- a/engine/redis_v2/utils.go +++ /dev/null @@ -1,15 +0,0 @@ -package redis_v2 - -import "strings" - -func join(args ...string) string { - return strings.Join(args, "/") -} - -func splits(n int, s string) []string { - return strings.SplitN(s, "/", n) -} - -func isLuaScriptGone(err error) bool { - return strings.HasPrefix(err.Error(), "NOSCRIPT") -} diff --git a/go.mod b/go.mod index 89a1e1c..6bbff1c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/bitleak/lmstfy go 1.17 require ( - cloud.google.com/go/spanner v1.27.0 github.com/BurntSushi/toml v0.3.1 github.com/gin-gonic/gin v1.6.3 github.com/go-redis/redis/v8 v8.11.4 @@ -17,20 +16,11 @@ require ( github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.1 go.uber.org/automaxprocs v1.5.1 - google.golang.org/api v0.66.0 - google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 - google.golang.org/grpc v1.42.0 + google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 // indirect + google.golang.org/grpc v1.42.0 // indirect ) require ( - github.com/go-redsync/redsync/v4 v4.5.1 - go.uber.org/atomic v1.9.0 - google.golang.org/protobuf v1.27.1 -) - -require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v0.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -49,13 +39,9 @@ require ( github.com/go-playground/validator/v10 v10.8.0 // indirect github.com/go-redis/redis/v7 v7.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -80,17 +66,15 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/ugorji/go/codec v1.1.7 // indirect - go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.1.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/ini.v1 v1.63.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 27ebf36..03f470b 100644 --- a/go.sum +++ b/go.sum @@ -24,19 +24,12 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0 h1:rSUBvAyVwNJ5uQCKNJFMwPtTvJkfN38b6Pvb9zZoqJ8= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= @@ -44,8 +37,6 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.27.0 h1:F5CmUC0pc83kigWjG3YfXfV02+ip3G394jLYzbDtoEQ= -cloud.google.com/go/spanner v1.27.0/go.mod h1:/YWC/deZAtNGNmUXUJBjzcprotUO8q0q0AwPISFncwg= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -367,13 +358,10 @@ github.com/go-playground/validator/v10 v10.8.0 h1:1kAa0fCrnpv+QYdkdcRzrRM7AyYs5o github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= -github.com/go-redsync/redsync/v4 v4.5.1 h1:T97UCaY8MfQg/6kB7MTuimF4tnLOCdJbsvIoN5KmjZE= -github.com/go-redsync/redsync/v4 v4.5.1/go.mod h1:AfhgO1E6W3rlUTs6Zmz/B6qBZJFasV30lwo7nlizdDs= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -423,7 +411,6 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -456,8 +443,6 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -506,8 +491,6 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -530,7 +513,6 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -538,7 +520,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -862,8 +843,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= -github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -928,7 +907,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1085,8 +1063,6 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1098,7 +1074,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1203,11 +1178,7 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1335,21 +1306,13 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.66.0 h1:CbGy4LEiXCVCiNEDFgGpWOVwsDT7E2Qej1ZvN1P7KPg= -google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1408,18 +1371,7 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211019152133-63b7e35f4404/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 h1:0UVUC7VWA/mIU+5a4hVWH6xa234gLcRX8ZcrFKmWWKA= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1451,7 +1403,6 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= diff --git a/helper/redis.go b/helper/redis.go index b1032f0..7d334a9 100644 --- a/helper/redis.go +++ b/helper/redis.go @@ -69,7 +69,7 @@ func validateRedisPersistConfig(ctx context.Context, cli *redis.Client, conf *co if !isAppendOnlyEnabled { return errors.New("redis appendonly MUST be 'yes' to prevent data loss") } - if conf.EnableSecondaryStorage && maxMem == 0 { + if maxMem == 0 { return errors.New("redis maxmemory MUST be assigned when secondary storage is enabled") } return nil diff --git a/helper/redis_test.go b/helper/redis_test.go index 8c6bdc8..dcdbf69 100644 --- a/helper/redis_test.go +++ b/helper/redis_test.go @@ -21,7 +21,6 @@ func TestValidateRedisConfig(t *testing.T) { require.Nil(t, err) assert.NotNil(t, ValidateRedisConfig(ctx, &defaultPool)) _, err = redisCli.ConfigSet(ctx, "maxmemory-policy", "noeviction").Result() - defaultPool.EnableSecondaryStorage = true assert.NotNil(t, ValidateRedisConfig(ctx, &defaultPool)) _, err = redisCli.ConfigSet(ctx, "maxmemory", "10000000").Result() require.Nil(t, err) diff --git a/model/job.proto b/model/job.proto deleted file mode 100644 index 11b6e8e..0000000 --- a/model/job.proto +++ /dev/null @@ -1,8 +0,0 @@ -syntax = "proto3"; -package model; -option go_package="github.com/lmstfy/engine/model"; - -message JobData { - bytes data = 1; - map attributes = 2; -} \ No newline at end of file diff --git a/scripts/run-test.sh b/scripts/run-test.sh index 37bb4a8..e06c338 100755 --- a/scripts/run-test.sh +++ b/scripts/run-test.sh @@ -1,4 +1,3 @@ #!/bin/bash set -e -x -export SPANNER_EMULATOR_HOST=localhost:9010 go test $(go list ./... | grep -v client) -race -v -covermode=atomic -coverprofile=coverage.out -p 1 diff --git a/scripts/schemas/spanner/ddls.sql b/scripts/schemas/spanner/ddls.sql deleted file mode 100644 index b8f24f5..0000000 --- a/scripts/schemas/spanner/ddls.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE lmstfy_jobs ( - pool_name STRING(1024), - job_id STRING(1024), - namespace STRING(1024), - queue STRING(1024), - body BYTES(MAX), - expired_time INT64 NOT NULL, - ready_time INT64 NOT NULL, - tries INT64 NOT NULL, - created_time INT64 NOT NULL -) PRIMARY KEY (job_id); \ No newline at end of file diff --git a/scripts/spanner/docker-compose.yml b/scripts/spanner/docker-compose.yml deleted file mode 100644 index 479deaa..0000000 --- a/scripts/spanner/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '2' - -services: - - emulator: - image: gcr.io/cloud-spanner-emulator/emulator - ports: - - "9010:9010" - - "9020:9020" - - diff --git a/server/handlers/queue.go b/server/handlers/queue.go index a7f5f7e..7604b65 100644 --- a/server/handlers/queue.go +++ b/server/handlers/queue.go @@ -10,7 +10,6 @@ import ( "github.com/sirupsen/logrus" "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/engine/redis_v2" ) const ( @@ -22,9 +21,9 @@ const ( // PUT /:namespace/:queue // @query: -// - delay: uint32 -// - ttl: uint32 -// - tries: uint16 +// - delay: uint32 +// - ttl: uint32 +// - tries: uint16 func Publish(c *gin.Context) { logger := GetHTTPLogger(c) e := c.MustGet("engine").(engine.Engine) @@ -86,24 +85,7 @@ func Publish(c *gin.Context) { c.JSON(http.StatusRequestEntityTooLarge, gin.H{"error": "body too large"}) return } - attributes := parseAttributes(c) - var job engine.Job - // check engine version - if _, ok := e.(*redis_v2.Engine); ok { - req := &engine.CreateJobReq{ - Namespace: namespace, - Queue: queue, - Body: body, - TTL: uint32(ttlSecond), - Delay: uint32(delaySecond), - Tries: uint16(tries), - Attributes: attributes, - } - job = engine.NewJobFromReq(req) - } else { - job = engine.NewJob(namespace, queue, body, uint32(ttlSecond), uint32(delaySecond), uint16(tries), "") - } - + job := engine.NewJob(namespace, queue, body, uint32(ttlSecond), uint32(delaySecond), uint16(tries), "") jobID, err = e.Publish(job) if err != nil { logger.WithFields(logrus.Fields{ @@ -131,9 +113,9 @@ func Publish(c *gin.Context) { // PUT /:namespace/:queue/bulk // @query: -// - delay: uint32 -// - ttl: uint32 -// - tries: uint16 +// - delay: uint32 +// - ttl: uint32 +// - tries: uint16 func PublishBulk(c *gin.Context) { logger := GetHTTPLogger(c) e := c.MustGet("engine").(engine.Engine) @@ -170,7 +152,6 @@ func PublishBulk(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": "tries shouldn't be zero"}) return } - attributes := parseAttributes(c) body, err := c.GetRawData() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "failed to read body"}) @@ -197,29 +178,9 @@ func PublishBulk(c *gin.Context) { } } - var isV2Engine bool - if _, ok := e.(*redis_v2.Engine); ok { - isV2Engine = true - } - jobIDs := make([]string, 0) for _, job := range jobs { - var j engine.Job - if isV2Engine { - req := &engine.CreateJobReq{ - Namespace: namespace, - Queue: queue, - Body: job, - TTL: uint32(ttlSecond), - Delay: uint32(delaySecond), - Tries: uint16(tries), - Attributes: attributes, - } - j = engine.NewJobFromReq(req) - } else { - j = engine.NewJob(namespace, queue, job, uint32(ttlSecond), uint32(delaySecond), uint16(tries), "") - } - + j := engine.NewJob(namespace, queue, job, uint32(ttlSecond), uint32(delaySecond), uint16(tries), "") jobID, err := e.Publish(j) if err != nil { logger.WithFields(logrus.Fields{ @@ -249,9 +210,10 @@ func PublishBulk(c *gin.Context) { // GET /:namespace/:queue[,:queue]* // @query: -// - ttr: uint32 -// - timeout: uint32 -// - count: uint32 +// - ttr: uint32 +// - timeout: uint32 +// - count: uint32 +// // NOTE: according to RFC3986, the URL path part can contain comma(",") , // so I decide to use "," as the separator of queue names func Consume(c *gin.Context) { diff --git a/server/handlers/queue_test.go b/server/handlers/queue_test.go index fd296b4..e12d5ba 100644 --- a/server/handlers/queue_test.go +++ b/server/handlers/queue_test.go @@ -3,7 +3,6 @@ package handlers_test import ( "bytes" "encoding/json" - "errors" "fmt" "math/rand" "net/http" @@ -18,8 +17,6 @@ import ( "github.com/bitleak/lmstfy/server/handlers" ) -const jobAttributeHeaderPrefix = "lmstfy-attribute-" - func TestPublish(t *testing.T) { query := url.Values{} query.Add("delay", "5") @@ -40,30 +37,6 @@ func TestPublish(t *testing.T) { } } -func TestPublishV2(t *testing.T) { - query := url.Values{} - query.Add("delay", "5") - query.Add("ttl", "10") - query.Add("tries", "1") - query.Add("token", "test-v2:1234567") - - targetUrl := fmt.Sprintf("http://localhost/api/ns/qs1?%s", query.Encode()) - body := strings.NewReader("hello msg") - req, err := http.NewRequest("PUT", targetUrl, body) - if err != nil { - t.Fatalf("Failed to create request") - } - req.Header[jobAttributeHeaderPrefix+"flag"] = []string{"1"} - req.Header[jobAttributeHeaderPrefix+"label"] = []string{"abc"} - c, e, resp := ginTest(req) - e.Use(handlers.ValidateParams, handlers.SetupQueueEngine) - e.PUT("/api/:namespace/:queue", handlers.Publish) - e.HandleContext(c) - if resp.Code != http.StatusCreated { - t.Fatal("Failed to publish") - } -} - func TestConsume(t *testing.T) { body, _ := publishTestJob("ns", "q2", 0, 0) @@ -102,51 +75,6 @@ func TestConsume(t *testing.T) { } } -func TestConsumeV2(t *testing.T) { - body := []byte("test v2") - - jobID := publishJobV2("ns", "qs2", 0, 0, body) - - query := url.Values{} - query.Add("ttr", "10") - query.Add("timeout", "2") - query.Add("token", "test-v2:1234567") - - targetUrl := fmt.Sprintf("http://localhost/api/ns/qs2?%s", query.Encode()) - req, err := http.NewRequest("GET", targetUrl, nil) - if err != nil { - t.Fatalf("Failed to create request") - } - c, e, resp := ginTest(req) - e.Use(handlers.ValidateParams, handlers.SetupQueueEngine) - e.GET("/api/:namespace/:queue", handlers.Consume) - e.HandleContext(c) - if resp.Code != http.StatusOK { - t.Fatal("Failed to consume") - } - var data struct { - Msg string - Namespace string - Queue string - JobID string `json:"job_id"` - Data []byte - Tries int `json:"remain_tries"` - TTL int `json:"ttl"` - Attributes map[string]string `json:"attributes"` - } - err = json.Unmarshal(resp.Body.Bytes(), &data) - if err != nil { - t.Fatalf("Failed to decode response: %s", err) - } - assert.Equal(t, data.TTL, 0) - assert.Equal(t, 0, data.Tries) - assert.Equal(t, jobID, data.JobID) - if !bytes.Equal(data.Data, body) { - t.Fatalf("Mismatched job data") - } - assert.Equal(t, checkRespAttributes(data.Attributes), nil) -} - func TestNoBlockingConsumeMulti(t *testing.T) { body, jobID := publishTestJob("ns", "q4", 0, 60) query := url.Values{} @@ -265,40 +193,6 @@ func TestPeekQueue(t *testing.T) { assert.Equal(t, jobID, data.JobID) } -func TestPeekQueueV2(t *testing.T) { - body := []byte("test v2") - jobID := publishJobV2("ns", "qs3", 0, 60, body) - - targetUrl := "http://localhost/api/ns/qs3/peek?token=test-v2:1234567" - req, err := http.NewRequest("GET", targetUrl, nil) - if err != nil { - t.Fatalf("Failed to create request") - } - c, e, resp := ginTest(req) - e.Use(handlers.ValidateParams, handlers.SetupQueueEngine) - e.GET("/api/:namespace/:queue/peek", handlers.PeekQueue) - e.HandleContext(c) - if resp.Code != http.StatusOK { - t.Log(resp.Body.String()) - t.Fatal("Failed to peek queue") - } - - var data struct { - Namespace string - Queue string - JobID string `json:"job_id"` - Data []byte - Attributes map[string]string `json:"attributes"` - } - err = json.Unmarshal(resp.Body.Bytes(), &data) - if err != nil { - t.Fatal("Failed to decode response") - } - assert.Equal(t, jobID, data.JobID) - assert.Equal(t, body, data.Data) - assert.Equal(t, checkRespAttributes(data.Attributes), nil) -} - func TestSize(t *testing.T) { publishTestJob("ns", "q8", 0, 60) @@ -364,40 +258,6 @@ func TestPeekJob(t *testing.T) { } } -func TestPeekJobV2(t *testing.T) { - body := []byte("test v2") - jobID := publishJobV2("ns", "qs4", 0, 60, body) - - targetUrl := fmt.Sprintf("http://localhost/api/ns/qs4/job/%s?token=test-v2:1234567", jobID) - req, err := http.NewRequest("GET", targetUrl, nil) - if err != nil { - t.Fatalf("Failed to create request") - } - c, e, resp := ginTest(req) - e.Use(handlers.ValidateParams, handlers.SetupQueueEngine) - e.GET("/api/:namespace/:queue/job/:job_id", handlers.PeekJob) - e.HandleContext(c) - if resp.Code != http.StatusOK { - t.Log(resp.Body.String()) - t.Fatal("Failed to peek job") - } - - var data struct { - Namespace string - Queue string - JobID string `json:"job_id"` - Data []byte - Attributes map[string]string `json:"attributes"` - } - err = json.Unmarshal(resp.Body.Bytes(), &data) - if err != nil { - t.Fatal("Failed to decode response") - } - assert.Equal(t, jobID, data.JobID) - assert.Equal(t, body, data.Data) - assert.Equal(t, checkRespAttributes(data.Attributes), nil) -} - func TestPeekDeadLetter(t *testing.T) { // Publish and consume a job without ACK(delete), so it will be move to deadletter ASAP _, jobID := publishTestJob("ns", "q10", 0, 60) @@ -683,70 +543,6 @@ func TestPublishBulk(t *testing.T) { } } -func TestPublishBulkV2(t *testing.T) { - query := url.Values{} - query.Add("delay", "0") - query.Add("ttl", "10") - query.Add("tries", "1") - query.Add("token", "test-v2:1234567") - - targetUrl := fmt.Sprintf("http://localhost/api/ns/qs5/bulk?%s", query.Encode()) - jobsData := []interface{}{ - "hello msg", - 123456, - struct { - Msg string `json:"msg"` - }{Msg: "success"}, - []string{"foo", "bar"}, - true, - } - bodyData, _ := json.Marshal(jobsData) - body := bytes.NewReader(bodyData) - req, err := http.NewRequest("PUT", targetUrl, body) - if err != nil { - t.Fatalf("Failed to create request") - } - req.Header[jobAttributeHeaderPrefix+"flag"] = []string{"1"} - req.Header[jobAttributeHeaderPrefix+"label"] = []string{"abc"} - c, e, resp := ginTest(req) - - e.Use(handlers.ValidateParams, handlers.SetupQueueEngine) - e.PUT("/api/:namespace/:queue/bulk", handlers.PublishBulk) - e.HandleContext(c) - if resp.Code != http.StatusCreated { - t.Fatal("Failed to publish") - } - var data struct { - Msg string - JobIDs []string `json:"job_ids"` - } - err = json.Unmarshal(resp.Body.Bytes(), &data) - if err != nil { - t.Fatalf("Failed to decode response: %s", err) - } - if len(data.JobIDs) != len(jobsData) { - t.Fatalf("Mismatched job count") - } - jobIDMap := map[string]int{} - for idx, jobID := range data.JobIDs { - jobIDMap[jobID] = idx - } - for i := 0; i < len(jobsData); i++ { - body, jobID, attributes := consumeTestJobV2("ns", "qs5", 0, 1) - idx, ok := jobIDMap[jobID] - if !ok { - t.Fatalf("Job not found") - } - jobData, _ := json.Marshal(jobsData[idx]) - if !bytes.Equal(body, jobData) { - t.Fatalf("Mismatched Job data") - } - if len(attributes) == 0 || attributes["flag"] != "1" || attributes["label"] != "abc" { - t.Fatalf("Mismatched Job attributes") - } - } -} - func publishTestJob(ns, q string, delay, ttl uint32) (body []byte, jobID string) { e := engine.GetEngine("") body = make([]byte, 10) @@ -766,39 +562,3 @@ func consumeTestJob(ns, q string, ttr, timeout uint32) (body []byte, jobID strin } return job.Body(), job.ID() } - -func publishJobV2(ns, q string, delay, ttl uint32, body []byte) string { - e := engine.GetEngine("test-v2") - attributes := make(map[string]string) - attributes["flag"] = "1" - attributes["label"] = "abc" - - j := engine.NewJobFromReq(&engine.CreateJobReq{ - Namespace: ns, - Queue: q, - ID: "", - Body: body, - TTL: ttl, - Delay: delay, - Tries: 1, - Attributes: attributes, - }) - jobID, _ := e.Publish(j) - return jobID -} - -func consumeTestJobV2(ns, q string, ttr, timeout uint32) (body []byte, jobID string, attributes map[string]string) { - e := engine.GetEngine("test-v2") - job, _ := e.Consume(ns, []string{q}, ttr, timeout) - if job == nil { - return nil, "", nil - } - return job.Body(), job.ID(), job.Attributes() -} - -func checkRespAttributes(attributes map[string]string) error { - if len(attributes) == 0 || attributes["flag"] != "1" || attributes["label"] != "abc" { - return errors.New("Mismatched job attributes") - } - return nil -} diff --git a/server/handlers/setup_test.go b/server/handlers/setup_test.go index d08b0f7..ed40bd5 100644 --- a/server/handlers/setup_test.go +++ b/server/handlers/setup_test.go @@ -12,7 +12,6 @@ import ( "github.com/bitleak/lmstfy/config" "github.com/bitleak/lmstfy/engine" redis_engine "github.com/bitleak/lmstfy/engine/redis" - "github.com/bitleak/lmstfy/engine/redis_v2" "github.com/bitleak/lmstfy/helper" "github.com/bitleak/lmstfy/server/handlers" "github.com/bitleak/lmstfy/throttler" @@ -60,9 +59,6 @@ func setup(Conf *config.Config) { if err := redis_engine.Setup(Conf); err != nil { panic(fmt.Sprintf("Failed to setup redis engine: %s", err)) } - if err := redis_v2.Setup(Conf); err != nil { - panic(fmt.Sprintf("Failed to setup redis v2 engine: %s", err)) - } if engine.GetEngine(config.DefaultPoolName) == nil { panic("missing default pool") } @@ -95,7 +91,6 @@ func runAllTests(m *testing.M, version string) { func TestMain(m *testing.M) { logger := logrus.New() redis_engine.SetLogger(logger) - redis_v2.SetLogger(logger) runAllTests(m, "") } diff --git a/server/main.go b/server/main.go index 42be76c..97e119b 100644 --- a/server/main.go +++ b/server/main.go @@ -20,12 +20,10 @@ import ( "github.com/bitleak/lmstfy/engine" "github.com/bitleak/lmstfy/engine/migration" redis_engine "github.com/bitleak/lmstfy/engine/redis" - "github.com/bitleak/lmstfy/engine/redis_v2" "github.com/bitleak/lmstfy/helper" "github.com/bitleak/lmstfy/log" "github.com/bitleak/lmstfy/server/handlers" "github.com/bitleak/lmstfy/server/middleware" - "github.com/bitleak/lmstfy/storage" "github.com/bitleak/lmstfy/throttler" "github.com/bitleak/lmstfy/version" ) @@ -167,10 +165,6 @@ func setupEngines(conf *config.Config, l *logrus.Logger) error { if err := redis_engine.Setup(conf); err != nil { return fmt.Errorf("%w in redis engine", err) } - redis_v2.SetLogger(l) - if err := redis_v2.Setup(conf); err != nil { - return fmt.Errorf("%w in redis v2 engine", err) - } migration.SetLogger(l) if err := migration.Setup(conf); err != nil { return fmt.Errorf("%w in migration engine", err) @@ -207,12 +201,6 @@ func main() { registerSignal(shutdown, func() { log.ReopenLogs(conf.LogDir) }) - // set up data manager - if conf.HasSecondaryStorage() { - if err := storage.Init(conf); err != nil { - panic(fmt.Sprintf("Failed to init data manager for secondary storage: %s", err)) - } - } if err := setupEngines(conf, logger); err != nil { panic(fmt.Sprintf("Failed to setup engines, err: %s", err.Error())) } @@ -233,7 +221,6 @@ func main() { adminSrv.Close() // Admin server does not need to be stopped gracefully apiSrv.Shutdown(context.Background()) - storage.Get().Shutdown() throttler.GetThrottler().Shutdown() logger.Infof("[%d] Bye bye", os.Getpid()) } diff --git a/storage/lock/redis.go b/storage/lock/redis.go deleted file mode 100644 index 120a4ea..0000000 --- a/storage/lock/redis.go +++ /dev/null @@ -1,57 +0,0 @@ -package lock - -import ( - "fmt" - "time" - - "github.com/go-redis/redis/v8" - "github.com/go-redsync/redsync/v4" - "github.com/go-redsync/redsync/v4/redis/goredis/v8" -) - -type Lock interface { - Name() string - Acquire() error - Expiry() time.Duration - ExtendLease() (bool, error) - Release() (bool, error) -} - -type RedisLock struct { - name string - redisCli *redis.Client - mu *redsync.Mutex - expiry time.Duration -} - -func NewRedisLock(redisCli *redis.Client, name string, expiry time.Duration) *RedisLock { - pool := goredis.NewPool(redisCli) - rs := redsync.New(pool) - mu := rs.NewMutex(fmt.Sprintf("pumper-%s.lock", name), redsync.WithExpiry(expiry)) - return &RedisLock{ - name: name, - redisCli: redisCli, - expiry: expiry, - mu: mu, - } -} - -func (l *RedisLock) Name() string { - return l.name -} - -func (l *RedisLock) Acquire() error { - return l.mu.Lock() -} - -func (l *RedisLock) Expiry() time.Duration { - return l.expiry -} - -func (l *RedisLock) ExtendLease() (bool, error) { - return l.mu.Extend() -} - -func (l *RedisLock) Release() (bool, error) { - return l.mu.Unlock() -} diff --git a/storage/manager.go b/storage/manager.go deleted file mode 100644 index a0f34d9..0000000 --- a/storage/manager.go +++ /dev/null @@ -1,214 +0,0 @@ -package storage - -import ( - "context" - "errors" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/go-redis/redis/v8" - "github.com/sirupsen/logrus" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/helper" - "github.com/bitleak/lmstfy/log" - "github.com/bitleak/lmstfy/storage/lock" - "github.com/bitleak/lmstfy/storage/persistence/model" - "github.com/bitleak/lmstfy/storage/persistence/spanner" - "github.com/bitleak/lmstfy/storage/pumper" -) - -const ( - defaultMaxJobPumpBatchSize = 512 - - defaultLockExpiry = 15 * time.Second - defaultPumpInterval = 3 * time.Second - - addJobSuccessStatus = "success" - addJobFailedStatus = "failed" -) - -type Manager struct { - cfg *config.Config - wg sync.WaitGroup - mu sync.Mutex - pools map[string]engine.Engine - pumpers map[string]pumper.Pumper - - redisCli *redis.Client - storage Persistence - maxPumpBatchSize int64 -} - -var manager *Manager - -func Init(cfg *config.Config) (err error) { - manager, err = NewManger(cfg) - return err -} - -func Get() *Manager { - return manager -} - -func createPersistStorage(cfg *config.SecondaryStorage) (Persistence, error) { - if cfg.Spanner != nil { - return spanner.NewSpanner(cfg.Spanner) - } - return nil, errors.New("require at least one of [Spanner]") -} - -func NewManger(cfg *config.Config) (*Manager, error) { - if cfg.SecondaryStorage == nil { - return nil, errors.New("nil second storage config") - } - storage, err := createPersistStorage(cfg.SecondaryStorage) - if err != nil { - return nil, err - } - redisCli := helper.NewRedisClient(&cfg.AdminRedis, nil) - if redisCli.Ping(context.Background()).Err() != nil { - return nil, fmt.Errorf("create redis client err: %w", err) - } - return &Manager{ - cfg: cfg, - redisCli: redisCli, - storage: storage, - pools: make(map[string]engine.Engine), - pumpers: make(map[string]pumper.Pumper), - maxPumpBatchSize: cfg.SecondaryStorage.MaxJobPumpBatchSize, - }, nil -} - -func (m *Manager) PumpFn(name string, pool engine.Engine, threshold int64) func() bool { - return func() bool { - logger := log.Get().WithField("pool", name) - if isHighRedisMemUsage(m.redisCli, m.cfg.SecondaryStorage.HighRedisMemoryWatermark) { - logger.Error("High redis usage, storage stops pumping data") - return false - } - - if m.maxPumpBatchSize == 0 || m.maxPumpBatchSize > defaultMaxJobPumpBatchSize { - m.maxPumpBatchSize = defaultMaxJobPumpBatchSize - } - now := time.Now() - req := &model.DBJobReq{ - PoolName: name, - ReadyTime: now.Unix() + threshold, - Count: m.maxPumpBatchSize, - } - ctx := context.TODO() - jobs, err := m.storage.GetReadyJobs(ctx, req) - if err != nil { - logger.WithError(err).Errorf("Failed to get ready jobs from storage") - return false - } - - if len(jobs) == 0 { - return false - } - logger.Debugf("Got %d ready jobs from storage", len(jobs)) - - jobsID := make([]string, 0) - for _, job := range jobs { - _, err = pool.Publish(job) - if err != nil && !errors.Is(err, engine.ErrJobExisted) { - logger.WithFields(logrus.Fields{ - "job": job, - "err": err, - }).Errorf("Failed to publish job") - continue - } - jobsID = append(jobsID, job.ID()) - } - - if _, err = m.storage.DelJobs(ctx, jobsID); err != nil { - logger.WithFields(logrus.Fields{ - "jobs": jobsID, - "err": err, - }).Errorf("Failed to delete jobs from storage") - return false - } - metrics.storageDelJobs.WithLabelValues(name).Add(float64(len(jobsID))) - return int64(len(jobsID)) == m.maxPumpBatchSize - } -} - -func (m *Manager) AddPool(name string, pool engine.Engine, threshold int64) { - m.mu.Lock() - defer m.mu.Unlock() - - redisLock := lock.NewRedisLock(m.redisCli, name, defaultLockExpiry) - pumper := pumper.NewDefault(redisLock, defaultPumpInterval) - m.pumpers[name] = pumper - - m.wg.Add(1) - go func() { - defer m.wg.Done() - pumper.Run(m.PumpFn(name, pool, threshold)) - }() -} - -func (m *Manager) AddJob(ctx context.Context, poolName string, job engine.Job) error { - var status string - err := m.storage.BatchAddJobs(ctx, poolName, []engine.Job{job}) - if err == nil { - status = addJobSuccessStatus - } else { - status = addJobFailedStatus - } - metrics.storageAddJobs.WithLabelValues(poolName, job.Namespace(), job.Queue(), status).Inc() - return err -} - -func (m *Manager) GetJobByID(ctx context.Context, ID string) ([]engine.Job, error) { - return m.storage.BatchGetJobsByID(ctx, []string{ID}) -} - -func (m *Manager) Shutdown() { - if m == nil { - return - } - - for _, pumper := range m.pumpers { - pumper.Shutdown() - } - m.wg.Wait() - - _ = m.redisCli.Close() - m.storage.Close() -} - -func isHighRedisMemUsage(cli *redis.Client, redisMemoryUsageWatermark float64) bool { - if redisMemoryUsageWatermark == 0 || redisMemoryUsageWatermark >= 1 { - return false - } - memoryInfo, err := cli.Info(context.TODO(), "memory").Result() - if err != nil { - return false - } - var usedMem, maxMem int64 - lines := strings.Split(memoryInfo, "\r\n") - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) != 2 { - continue - } - switch fields[0] { - case "used_memory": - usedMem, _ = strconv.ParseInt(fields[1], 10, 64) - case "maxmemory": - maxMem, _ = strconv.ParseInt(fields[1], 10, 64) - default: - continue - } - } - if maxMem == 0 { - return false - } - return float64(usedMem)/float64(maxMem) > redisMemoryUsageWatermark -} diff --git a/storage/metrics.go b/storage/metrics.go deleted file mode 100644 index 8baf0ab..0000000 --- a/storage/metrics.go +++ /dev/null @@ -1,44 +0,0 @@ -package storage - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -// Metrics contains storage related metrics -type Metrics struct { - storageAddJobs *prometheus.CounterVec - storageDelJobs *prometheus.CounterVec -} - -var ( - metrics *Metrics -) - -const ( - Namespace = "infra" - Subsystem = "lmstfy_v2_storage" -) - -func setupMetrics() { - cv := newCounterVecHelper - metrics = &Metrics{ - storageAddJobs: cv("storage_add_jobs", "namespace", "queue", "status"), - storageDelJobs: cv("storage_del_jobs"), - } -} - -func newCounterVecHelper(name string, labels ...string) *prometheus.CounterVec { - labels = append([]string{"pool"}, labels...) // all metrics has this common field `pool` - opts := prometheus.CounterOpts{} - opts.Namespace = Namespace - opts.Subsystem = Subsystem - opts.Name = name - opts.Help = name - counters := prometheus.NewCounterVec(opts, labels) - prometheus.MustRegister(counters) - return counters -} - -func init() { - setupMetrics() -} diff --git a/storage/persistence/model/job.go b/storage/persistence/model/job.go deleted file mode 100644 index 67aa921..0000000 --- a/storage/persistence/model/job.go +++ /dev/null @@ -1,40 +0,0 @@ -package model - -type DBJob struct { - PoolName string `spanner:"pool_name" json:"pool_name"` - JobID string `spanner:"job_id" json:"job_id"` - Namespace string `spanner:"namespace" json:"namespace"` - Queue string `spanner:"queue" json:"queue"` - Body []byte `spanner:"body" json:"body"` - ExpiredTime int64 `spanner:"expired_time" json:"expired_time"` - ReadyTime int64 `spanner:"ready_time" json:"ready_time"` - Tries int64 `spanner:"tries" json:"tries"` - CreatedTime int64 `spanner:"created_time" json:"created_time"` -} - -func (j *DBJob) TTL(now int64) uint32 { - if j.ExpiredTime == 0 { - return 0 - } - if j.ExpiredTime <= now { - return 1 - } else { - return uint32(j.ExpiredTime - now) - } -} - -func (j *DBJob) Delay(now int64) uint32 { - if j.ReadyTime <= now { - return 0 - } else { - return uint32(j.ReadyTime - now) - } -} - -type DBJobReq struct { - PoolName string - Namespace string - Queue string - ReadyTime int64 - Count int64 -} diff --git a/storage/persistence/spanner/setup_test.go b/storage/persistence/spanner/setup_test.go deleted file mode 100644 index 4f09256..0000000 --- a/storage/persistence/spanner/setup_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package spanner - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - - "cloud.google.com/go/spanner" - database "cloud.google.com/go/spanner/admin/database/apiv1" - instance "cloud.google.com/go/spanner/admin/instance/apiv1" - databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" - instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" - "google.golang.org/grpc/codes" - - "github.com/bitleak/lmstfy/config" -) - -func CreateInstance(ctx context.Context, cfg *config.SpannerConfig) error { - instanceName := fmt.Sprintf("projects/%s/instances/%s", cfg.Project, cfg.Instance) - - instanceAdminClient, err := instance.NewInstanceAdminClient(ctx) - if err != nil { - return err - } - defer instanceAdminClient.Close() - - _, err = instanceAdminClient.GetInstance(ctx, &instancepb.GetInstanceRequest{ - Name: instanceName, - }) - if err != nil && spanner.ErrCode(err) != codes.NotFound { - return err - } - if err == nil { - return nil - } - - _, err = instanceAdminClient.CreateInstance(ctx, &instancepb.CreateInstanceRequest{ - Parent: "projects/" + cfg.Project, - InstanceId: cfg.Instance, - }) - return err -} - -func CreateDatabase(ctx context.Context, cfg *config.SpannerConfig) error { - databaseAdminClient, err := database.NewDatabaseAdminClient(ctx) - if err != nil { - return err - } - instanceName := fmt.Sprintf("projects/%s/instances/%s", cfg.Project, cfg.Instance) - dbName := fmt.Sprintf("%s/databases/%s", instanceName, cfg.Database) - _, err = databaseAdminClient.GetDatabase(ctx, &databasepb.GetDatabaseRequest{Name: dbName}) - if err != nil && spanner.ErrCode(err) != codes.NotFound { - return err - } - if err == nil { - // db exists - return nil - } - - ddlBytes, err := os.ReadFile("../../../scripts/schemas/spanner/ddls.sql") - if err != nil { - return fmt.Errorf("read ddls file: %w", err) - } - ddls := make([]string, 0) - for _, ddl := range strings.Split(string(ddlBytes), ";") { - ddl = strings.TrimSpace(ddl) - if len(ddl) != 0 { - ddls = append(ddls, ddl) - } - } - op, err := databaseAdminClient.CreateDatabase(ctx, &databasepb.CreateDatabaseRequest{ - Parent: instanceName, - CreateStatement: "CREATE DATABASE `" + cfg.Database + "`", - ExtraStatements: ddls, - }) - if err != nil { - return err - } - _, err = op.Wait(ctx) - return err -} - -func TestMain(m *testing.M) { - if os.Getenv("SPANNER_EMULATOR_HOST") == "" { - panic("SPANNER_EMULATOR_HOST is not set") - } - if err := CreateInstance(context.Background(), config.SpannerEmulator); err != nil { - panic("Create instance: " + err.Error()) - } - if err := CreateDatabase(context.Background(), config.SpannerEmulator); err != nil { - panic("Create database: " + err.Error()) - } - os.Exit(m.Run()) -} diff --git a/storage/persistence/spanner/spanner.go b/storage/persistence/spanner/spanner.go deleted file mode 100644 index 81a8c91..0000000 --- a/storage/persistence/spanner/spanner.go +++ /dev/null @@ -1,211 +0,0 @@ -package spanner - -import ( - "context" - "errors" - "fmt" - "time" - - "cloud.google.com/go/spanner" - "github.com/go-redis/redis/v8" - "google.golang.org/api/option" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/storage/persistence/model" -) - -const ( - MaxJobBatchSize = 1000 -) - -type Spanner struct { - cli *spanner.Client - redisClient *redis.Client - tableName string -} - -func createSpannerClient(cfg *config.SpannerConfig) (*spanner.Client, error) { - db := fmt.Sprintf("projects/%s/instances/%s/databases/%s", cfg.Project, cfg.Instance, cfg.Database) - if cfg.CredentialsFile != "" { - opt := option.WithCredentialsFile(cfg.CredentialsFile) - return spanner.NewClient(context.Background(), db, opt) - } - return spanner.NewClient(context.Background(), db) -} - -func NewSpanner(cfg *config.SpannerConfig) (*Spanner, error) { - client, err := createSpannerClient(cfg) - if err != nil { - return nil, err - } - return &Spanner{ - cli: client, - tableName: cfg.TableName, - }, nil -} - -// BatchAddJobs write jobs data into secondary storage -func (s *Spanner) BatchAddJobs(ctx context.Context, poolName string, jobs []engine.Job) (err error) { - err = validateReq(jobs) - if err != nil { - return err - } - now := time.Now().Unix() - dbJobs := make([]*model.DBJob, 0) - for _, job := range jobs { - expiredTime := int64(0) - if job.TTL() > 0 { - expiredTime = now + int64(job.TTL()) - } - j := &model.DBJob{ - PoolName: poolName, - JobID: job.ID(), - Namespace: job.Namespace(), - Queue: job.Queue(), - Body: job.Body(), - ExpiredTime: expiredTime, - ReadyTime: now + int64(job.Delay()), - Tries: int64(job.Tries()), - CreatedTime: now, - } - dbJobs = append(dbJobs, j) - } - - _, err = s.cli.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { - mutations := make([]*spanner.Mutation, 0) - for _, job := range dbJobs { - mut, err := spanner.InsertStruct(s.tableName, job) - if err != nil { - return err - } - mutations = append(mutations, mut) - } - return txn.BufferWrite(mutations) - }) - return err -} - -// GetQueueSize returns the size of data in storage which are due before certain due time -func (s *Spanner) GetQueueSize(ctx context.Context, req []*model.DBJobReq) (count map[string]int64, err error) { - txn := s.cli.ReadOnlyTransaction() - defer txn.Close() - count = make(map[string]int64) - - for _, r := range req { - var tmpCount int64 - key := fmt.Sprintf("%s/%s", r.Namespace, r.Queue) - readRow := func(r *spanner.Row) error { return r.ColumnByName("tmpCount", &tmpCount) } - iter := txn.Query(ctx, spanner.Statement{ - SQL: "SELECT COUNT(*) AS tmpCount FROM lmstfy_jobs WHERE namespace = @namespace and queue = @queue and " + - "ready_time >= @readytime LIMIT 1", - Params: map[string]interface{}{ - "namespace": r.Namespace, - "queue": r.Queue, - "readytime": r.ReadyTime, - }, - }) - err = iter.Do(readRow) - if err != nil { - continue - } - count[key] = tmpCount - } - return count, nil -} - -// DelJobs remove job data from storage based on job id -func (s *Spanner) DelJobs(ctx context.Context, jobIDs []string) (count int64, err error) { - if len(jobIDs) == 0 { - return 0, nil - } - _, err = s.cli.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { - count, err = txn.Update(ctx, spanner.Statement{ - SQL: "DELETE FROM lmstfy_jobs WHERE job_id IN UNNEST(@ids)", - Params: map[string]interface{}{ - "ids": jobIDs, - }, - }) - return err - }) - return count, err -} - -// GetReadyJobs return jobs which are ready based on input ready time from data storage -func (s *Spanner) GetReadyJobs(ctx context.Context, req *model.DBJobReq) (jobs []engine.Job, err error) { - if req.ReadyTime <= 0 { - return nil, fmt.Errorf("GetReadyJobs failed: missing readytime parameter") - } - now := time.Now().Unix() - txn := s.cli.ReadOnlyTransaction() - defer txn.Close() - iter := txn.Query(ctx, spanner.Statement{ - SQL: "SELECT pool_name, job_id, namespace, queue, body, ready_time, expired_time, created_time, tries " + - "FROM lmstfy_jobs WHERE pool_name = @poolname and ready_time <= @readytime LIMIT @limit", - Params: map[string]interface{}{ - "poolname": req.PoolName, - "readytime": req.ReadyTime, - "limit": req.Count, - }, - }) - - err = iter.Do(func(row *spanner.Row) error { - dbJob := &model.DBJob{} - if err = row.ToStruct(dbJob); err != nil { - return err - } - j := engine.NewJob(dbJob.Namespace, dbJob.Queue, dbJob.Body, dbJob.TTL(now), - dbJob.Delay(now), uint16(dbJob.Tries), dbJob.JobID) - jobs = append(jobs, j) - return nil - }) - if err != nil { - return nil, err - } - return jobs, nil -} - -// BatchGetJobsByID returns job data by job ID -func (s *Spanner) BatchGetJobsByID(ctx context.Context, IDs []string) (jobs []engine.Job, err error) { - txn := s.cli.ReadOnlyTransaction() - now := time.Now().Unix() - defer txn.Close() - - iter := txn.Query(ctx, spanner.Statement{ - SQL: "SELECT pool_name, job_id, namespace, queue, body, ready_time, expired_time, created_time, tries " + - "FROM lmstfy_jobs WHERE job_id IN UNNEST(@ids) LIMIT @limit", - Params: map[string]interface{}{ - "ids": IDs, - "limit": len(IDs), - }, - }) - err = iter.Do(func(row *spanner.Row) error { - dbJob := &model.DBJob{} - if err = row.ToStruct(dbJob); err != nil { - return err - } - j := engine.NewJob(dbJob.Namespace, dbJob.Queue, dbJob.Body, dbJob.TTL(now), - dbJob.Delay(now), uint16(dbJob.Tries), dbJob.JobID) - jobs = append(jobs, j) - return nil - }) - if err != nil { - return nil, err - } - - return jobs, nil -} - -func validateReq(req []engine.Job) error { - if len(req) == 0 { - return errors.New("invalid req, null jobs list") - } - if len(req) > MaxJobBatchSize { - return errors.New("invalid req, exceed maximum input batch size") - } - return nil -} - -func (s *Spanner) Close() { - s.cli.Close() -} diff --git a/storage/persistence/spanner/spanner_test.go b/storage/persistence/spanner/spanner_test.go deleted file mode 100644 index c1b5c24..0000000 --- a/storage/persistence/spanner/spanner_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package spanner - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/bitleak/lmstfy/config" - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/storage/persistence/model" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - poolName = "test_pool" - namespace = "test_ns" -) - -func TestSpanner_Basic(t *testing.T) { - ctx := context.Background() - mgr, err := NewSpanner(config.SpannerEmulator) - require.NoError(t, err) - - jobCnt := int64(10) - jobIDs := make([]string, jobCnt) - createJobs := make([]engine.Job, jobCnt) - for i := int64(0); i < jobCnt; i++ { - queue := "q1" - if i%2 == 0 { - queue = "q2" - } - createJobs[i] = engine.NewJob(namespace, queue, []byte("hello"), 10, 4, 3, "") - jobIDs[i] = createJobs[i].ID() - } - require.NoError(t, mgr.BatchAddJobs(ctx, poolName, createJobs)) - - validateJob := func(t *testing.T, job engine.Job) { - assert.NotEmpty(t, job.ID()) - assert.EqualValues(t, job.Namespace(), namespace) - assert.EqualValues(t, job.Tries(), 3) - assert.GreaterOrEqual(t, job.Delay(), uint32(1)) - assert.LessOrEqual(t, job.Delay(), uint32(4)) - assert.GreaterOrEqual(t, job.TTL(), uint32(1)) - assert.LessOrEqual(t, job.TTL(), uint32(10)) - } - - t.Run("Batch Get Jobs By ID", func(t *testing.T) { - jobs, err := mgr.BatchGetJobsByID(ctx, jobIDs) - assert.Nil(t, err) - assert.EqualValues(t, len(jobIDs), len(jobs)) - for _, job := range jobs { - validateJob(t, job) - } - }) - - t.Run("Get Ready Jobs", func(t *testing.T) { - readyJobs, err := mgr.GetReadyJobs(ctx, &model.DBJobReq{ - PoolName: poolName, - ReadyTime: time.Now().Unix() + 10, - Count: jobCnt, - }) - require.NoError(t, err) - require.EqualValues(t, jobCnt, len(readyJobs)) - for _, job := range readyJobs { - validateJob(t, job) - } - }) - - t.Run("Get Queue Size", func(t *testing.T) { - queueSizes, err := mgr.GetQueueSize(ctx, []*model.DBJobReq{ - {PoolName: poolName, Namespace: namespace, Queue: "q1", ReadyTime: time.Now().Unix() - 10, Count: jobCnt}, - {PoolName: poolName, Namespace: namespace, Queue: "q2", ReadyTime: time.Now().Unix() - 10, Count: jobCnt}, - }) - require.NoError(t, err) - assert.EqualValues(t, jobCnt/2, queueSizes[fmt.Sprintf("%s/%s", namespace, "q1")]) - assert.EqualValues(t, jobCnt/2, queueSizes[fmt.Sprintf("%s/%s", namespace, "q2")]) - }) - - t.Run("Del Jobs", func(t *testing.T) { - count, err := mgr.DelJobs(context.Background(), jobIDs) - require.NoError(t, err) - require.EqualValues(t, jobCnt, count) - }) -} - -func TestSpanner_NoExpiredJob(t *testing.T) { - ctx := context.Background() - mgr, err := NewSpanner(config.SpannerEmulator) - require.NoError(t, err) - - jobCnt := int64(10) - jobIDs := make([]string, jobCnt) - createJobs := make([]engine.Job, jobCnt) - for i := int64(0); i < jobCnt; i++ { - queue := "q3" - createJobs[i] = engine.NewJob(namespace, queue, []byte("hello"), 0, 4, 3, "") - jobIDs[i] = createJobs[i].ID() - } - require.NoError(t, mgr.BatchAddJobs(ctx, poolName, createJobs)) - - jobs, err := mgr.BatchGetJobsByID(ctx, jobIDs) - assert.Nil(t, err) - assert.EqualValues(t, len(jobIDs), len(jobs)) - for _, job := range jobs { - assert.EqualValues(t, job.TTL(), 0) - } -} diff --git a/storage/persitence.go b/storage/persitence.go deleted file mode 100644 index d9ab017..0000000 --- a/storage/persitence.go +++ /dev/null @@ -1,23 +0,0 @@ -package storage - -import ( - "context" - - "github.com/bitleak/lmstfy/engine" - "github.com/bitleak/lmstfy/storage/persistence/model" -) - -// Persistence handles requests related to secondary storage -type Persistence interface { - // BatchAddJobs write jobs data into secondary storage - BatchAddJobs(ctx context.Context, poolName string, jobs []engine.Job) (err error) - // GetQueueSize returns the size of data in storage which are due before certain due time - GetQueueSize(ctx context.Context, req []*model.DBJobReq) (count map[string]int64, err error) - // DelJobs remove job data from storage based on job id - DelJobs(ctx context.Context, jobIDs []string) (count int64, err error) - // GetReadyJobs return jobs which are ready based on input ready time from data storage - GetReadyJobs(ctx context.Context, req *model.DBJobReq) (jobs []engine.Job, err error) - // BatchGetJobsByID returns job data by job ID - BatchGetJobsByID(ctx context.Context, IDs []string) (jobs []engine.Job, err error) - Close() -} diff --git a/storage/pumper/pumper.go b/storage/pumper/pumper.go deleted file mode 100644 index aa531e7..0000000 --- a/storage/pumper/pumper.go +++ /dev/null @@ -1,113 +0,0 @@ -package pumper - -import ( - "sync" - "time" - - "github.com/bitleak/lmstfy/log" - redislock "github.com/bitleak/lmstfy/storage/lock" - "go.uber.org/atomic" -) - -type Pumper interface { - Run(fn func() bool) - Shutdown() -} - -type Default struct { - isLeader atomic.Bool - lock redislock.Lock - interval time.Duration - - wg sync.WaitGroup - shutdown chan struct{} -} - -func NewDefault(lock redislock.Lock, interval time.Duration) *Default { - p := &Default{ - lock: lock, - interval: interval, - shutdown: make(chan struct{}), - } - p.isLeader.Store(false) - - logger := log.Get().WithField("lock_name", p.lock.Name()) - if err := p.lock.Acquire(); err == nil { - p.isLeader.Store(true) - logger.Info("Acquired the pumper lock, I'm leader now") - } else { - logger.WithError(err).Info("Lost the pumper lock") - } - - p.wg.Add(1) - go func() { - defer p.wg.Done() - p.keepalive() - }() - return p -} - -func (p *Default) keepalive() { - logger := log.Get().WithField("lock_name", p.lock.Name()) - - keepAliveTicker := time.NewTicker(p.lock.Expiry() / 3) - for { - select { - case <-keepAliveTicker.C: - if p.isLeader.Load() { - extendLeaseOK, err := p.lock.ExtendLease() - if !extendLeaseOK || err != nil { - p.isLeader.Store(false) - logger.WithError(err).Error("Failed to extend lease") - } - } else { - if err := p.lock.Acquire(); err == nil { - // Become Leader - p.isLeader.Store(true) - logger.Info("Become leader now") - continue - } - } - case <-p.shutdown: - logger.Info("Keep alive will be exited because the pumper was shutdown") - return - } - } -} - -func (p *Default) Run(fn func() bool) { - logger := log.Get().WithField("lock_name", p.lock.Name()) - - defer func() { - if p.isLeader.Load() { - if _, err := p.lock.Release(); err != nil { - logger.WithError(err).Error("Failed to release the pumper lock") - } - } - }() - - pumpTicker := time.NewTicker(p.interval) - for { - select { - case <-pumpTicker.C: - continueLoop := true - for p.isLeader.Load() && continueLoop { - select { - case <-p.shutdown: - logger.Info("The pumper was shutdown, will release the pumper lock") - return - default: - } - continueLoop = fn() - } - case <-p.shutdown: - logger.Info("The pumper was shutdown, will release the pumper lock") - return - } - } -} - -func (p *Default) Shutdown() { - close(p.shutdown) - p.wg.Wait() -} diff --git a/storage/pumper/pumper_test.go b/storage/pumper/pumper_test.go deleted file mode 100644 index b4114c9..0000000 --- a/storage/pumper/pumper_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package pumper - -import ( - "testing" - "time" - - "github.com/bitleak/lmstfy/log" - "github.com/bitleak/lmstfy/storage/lock" - goredis "github.com/go-redis/redis/v8" - "github.com/orlangure/gnomock" - "github.com/orlangure/gnomock/preset/redis" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -func init() { - log.Setup("json", "", "INFO", "ERROR") -} - -func TestNewDefault_FailOver(t *testing.T) { - mockRedis, err := gnomock.Start(redis.Preset()) - require.Empty(t, err) - defer gnomock.Stop(mockRedis) - redisCli := goredis.NewClient(&goredis.Options{ - Addr: mockRedis.DefaultAddress(), - }) - interval := time.Second - lock := lock.NewRedisLock(redisCli, "test-fail-over", interval) - p1 := NewDefault(lock, interval/2) - p2 := NewDefault(lock, interval/2) - - var counter atomic.Int32 - go p1.Run(func() bool { - counter.Add(1) - return false - }) - go p2.Run(func() bool { - counter.Add(1) - return false - }) - time.Sleep(2 * time.Second) - assert.GreaterOrEqual(t, counter.Load(), int32(2)) - assert.LessOrEqual(t, counter.Load(), int32(5)) - p1.Shutdown() - time.Sleep(2 * time.Second) - assert.LessOrEqual(t, counter.Load(), int32(8)) - p2.Shutdown() -}