diff --git a/Makefile b/Makefile index 16f8636003..09936e02db 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ NAME = "github.com/odpf/optimus" LAST_COMMIT := $(shell git rev-parse --short HEAD) LAST_TAG := "$(shell git rev-list --tags --max-count=1)" OPMS_VERSION := "$(shell git describe --tags ${LAST_TAG})-next" -PROTON_COMMIT := "fae8287656b163ae07a7f03edd3ea3f5df499dcb" +PROTON_COMMIT := "31ac9046d1a8c95a2f4645b87bf0620a3e6bb8bc" .PHONY: build test test-ci generate-proto unit-test-ci integration-test vet coverage clean install lint @@ -66,4 +66,4 @@ install: ## install required dependencies go install github.com/bufbuild/buf/cmd/buf@v1.5.0 go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0 go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.5.0 - go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.5.0 \ No newline at end of file + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.5.0 diff --git a/client/cmd/commands.go b/client/cmd/commands.go index 8850dc7d46..23c4e8fea0 100644 --- a/client/cmd/commands.go +++ b/client/cmd/commands.go @@ -13,6 +13,7 @@ import ( "github.com/odpf/optimus/client/cmd/playground" "github.com/odpf/optimus/client/cmd/plugin" "github.com/odpf/optimus/client/cmd/project" + "github.com/odpf/optimus/client/cmd/replay" "github.com/odpf/optimus/client/cmd/resource" "github.com/odpf/optimus/client/cmd/scheduler" "github.com/odpf/optimus/client/cmd/secret" @@ -66,6 +67,7 @@ func New() *cli.Command { version.NewVersionCommand(), playground.NewPlaygroundCommand(), scheduler.NewSchedulerCommand(), + replay.NewReplayCommand(), // Will decide later, to add it server side or not plugin.NewPluginCommand(), diff --git a/client/cmd/replay/create.go b/client/cmd/replay/create.go new file mode 100644 index 0000000000..0b04a3c44c --- /dev/null +++ b/client/cmd/replay/create.go @@ -0,0 +1,159 @@ +package replay + +import ( + "errors" + "fmt" + "time" + + "github.com/odpf/salt/log" + "github.com/spf13/cobra" + "golang.org/x/net/context" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/odpf/optimus/client/cmd/internal" + "github.com/odpf/optimus/client/cmd/internal/connectivity" + "github.com/odpf/optimus/client/cmd/internal/logger" + "github.com/odpf/optimus/config" + pb "github.com/odpf/optimus/protos/odpf/optimus/core/v1beta1" +) + +const ( + replayTimeout = time.Minute * 1 + ISOTimeLayout = time.RFC3339 +) + +type createCommand struct { + logger log.Logger + configFilePath string + + parallel bool + description string + jobConfig string + + projectName string + namespaceName string + host string +} + +// CreateCommand initializes command for creating a replay request +func CreateCommand() *cobra.Command { + refresh := &createCommand{ + logger: logger.NewClientLogger(), + } + + cmd := &cobra.Command{ + Use: "create", + Short: "Run replay operation on a dag based on provided start and end time range", + Long: "This operation takes three arguments, first is DAG name[required]\nused in optimus specification, " + + "second is start time[required] of\nreplay, third is end time[optional] of replay. \nDate ranges are inclusive.", + Example: "optimus replay create <2023-01-01T02:30:00Z00:00> [2023-01-02T02:30:00Z00:00]", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("job name is required") + } + if len(args) < 2 { //nolint: gomnd + return errors.New("replay start time is required") + } + return nil + }, + RunE: refresh.RunE, + PreRunE: refresh.PreRunE, + } + + refresh.injectFlags(cmd) + return cmd +} + +func (r *createCommand) injectFlags(cmd *cobra.Command) { + // Config filepath flag + cmd.Flags().StringVarP(&r.configFilePath, "config", "c", config.EmptyPath, "File path for client configuration") + cmd.Flags().StringVarP(&r.namespaceName, "namespace-name", "n", "", "Name of the optimus namespace") + + cmd.Flags().BoolVarP(&r.parallel, "parallel", "", false, "Backfill job runs in parallel") + cmd.Flags().StringVarP(&r.description, "description", "d", "", "Description of why backfill is needed") + cmd.Flags().StringVarP(&r.jobConfig, "job-config", "", "", "additional job configurations") + + // Mandatory flags if config is not set + cmd.Flags().StringVarP(&r.projectName, "project-name", "p", "", "Name of the optimus project") + cmd.Flags().StringVar(&r.host, "host", "", "Optimus service endpoint url") +} + +func (r *createCommand) PreRunE(cmd *cobra.Command, _ []string) error { + conf, err := internal.LoadOptionalConfig(r.configFilePath) + if err != nil { + return err + } + + if conf == nil { + internal.MarkFlagsRequired(cmd, []string{"project-name", "host"}) + return nil + } + + if r.projectName == "" { + r.projectName = conf.Project.Name + } + if r.host == "" { + r.host = conf.Host + } + return nil +} + +func (r *createCommand) RunE(_ *cobra.Command, args []string) error { + jobName := args[0] + startTime := args[1] + endTime := args[1] + if len(args) >= 3 { //nolint: gomnd + endTime = args[2] + } + + replayID, err := r.createReplayRequest(jobName, startTime, endTime, r.jobConfig) + if err != nil { + return err + } + r.logger.Info("Replay request created with id %s", replayID) + return nil +} + +func (r *createCommand) createReplayRequest(jobName, startTimeStr, endTimeStr, jobConfig string) (string, error) { + conn, err := connectivity.NewConnectivity(r.host, replayTimeout) + if err != nil { + return "", err + } + defer conn.Close() + + replayService := pb.NewReplayServiceClient(conn.GetConnection()) + + startTime, err := getTimeProto(startTimeStr) + if err != nil { + return "", err + } + endTime, err := getTimeProto(endTimeStr) + if err != nil { + return "", err + } + respStream, err := replayService.Replay(conn.GetContext(), &pb.ReplayRequest{ + ProjectName: r.projectName, + JobName: jobName, + NamespaceName: r.namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: r.parallel, + Description: r.description, + JobConfig: jobConfig, + }) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + r.logger.Error("Replay creation took too long, timing out") + } + return "", fmt.Errorf("replay request failed: %w", err) + } + return respStream.Id, nil +} + +func getTimeProto(timeStr string) (*timestamppb.Timestamp, error) { + parsedTime, err := time.Parse(ISOTimeLayout, timeStr) + if err != nil { + return nil, err + } + return timestamppb.New(parsedTime), nil +} diff --git a/client/cmd/replay/replay.go b/client/cmd/replay/replay.go new file mode 100644 index 0000000000..e64fc80dda --- /dev/null +++ b/client/cmd/replay/replay.go @@ -0,0 +1,21 @@ +package replay + +import ( + "github.com/spf13/cobra" +) + +// NewReplayCommand initializes command for replay +func NewReplayCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "replay", + Short: "replay related functions", + Annotations: map[string]string{ + "group:core": "false", + }, + } + + cmd.AddCommand( + CreateCommand(), + ) + return cmd +} diff --git a/config/config_server.go b/config/config_server.go index 984a961c3a..b50a34b80d 100644 --- a/config/config_server.go +++ b/config/config_server.go @@ -1,5 +1,7 @@ package config +import "time" + type ServerConfig struct { Version Version `mapstructure:"version"` Log LogConfig `mapstructure:"log"` @@ -8,6 +10,7 @@ type ServerConfig struct { Telemetry TelemetryConfig `mapstructure:"telemetry"` ResourceManagers []ResourceManager `mapstructure:"resource_managers"` Plugin PluginConfig `mapstructure:"plugin"` + Replay ReplayConfig `mapstructure:"replay"` } type Serve struct { @@ -47,3 +50,8 @@ type ResourceManagerConfigOptimus struct { type PluginConfig struct { Artifacts []string `mapstructure:"artifacts"` } + +// TODO: add worker interval +type ReplayConfig struct { + ReplayTimeout time.Duration `mapstructure:"replay_timeout" default:"3h"` +} diff --git a/config/loader_test.go b/config/loader_test.go index c878778c5d..54f2d391f0 100644 --- a/config/loader_test.go +++ b/config/loader_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" "testing" + "time" saltConfig "github.com/odpf/salt/config" "github.com/spf13/afero" @@ -277,6 +278,8 @@ func (s *ConfigTestSuite) initExpectedServerConfig() { }, } s.expectedServerConfig.Plugin = config.PluginConfig{} + + s.expectedServerConfig.Replay.ReplayTimeout = time.Hour * 3 } func (*ConfigTestSuite) initServerConfigEnv() { diff --git a/core/job/handler/v1beta1/job.go b/core/job/handler/v1beta1/job.go index 10731966d3..cf5b27cbe2 100644 --- a/core/job/handler/v1beta1/job.go +++ b/core/job/handler/v1beta1/job.go @@ -240,7 +240,7 @@ func (jh *JobHandler) ListJobSpecification(ctx context.Context, req *pb.ListJobS func (*JobHandler) GetWindow(_ context.Context, req *pb.GetWindowRequest) (*pb.GetWindowResponse, error) { // TODO: the default version to be deprecated & made mandatory in future releases - version := 1 + version := 2 if err := req.GetScheduledAt().CheckValid(); err != nil { return nil, fmt.Errorf("%w: failed to parse schedule time %s", err, req.GetScheduledAt()) } diff --git a/core/scheduler/handler/v1beta1/replay.go b/core/scheduler/handler/v1beta1/replay.go new file mode 100644 index 0000000000..cca0da6ffa --- /dev/null +++ b/core/scheduler/handler/v1beta1/replay.go @@ -0,0 +1,83 @@ +package v1beta1 + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + "github.com/odpf/salt/log" + "golang.org/x/net/context" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/errors" + pb "github.com/odpf/optimus/protos/odpf/optimus/core/v1beta1" +) + +type ReplayService interface { + CreateReplay(ctx context.Context, tenant tenant.Tenant, jobName scheduler.JobName, config *scheduler.ReplayConfig) (replayID uuid.UUID, err error) +} + +type ReplayHandler struct { + l log.Logger + service ReplayService + + pb.UnimplementedReplayServiceServer +} + +func (h ReplayHandler) Replay(ctx context.Context, req *pb.ReplayRequest) (*pb.ReplayResponse, error) { + replayTenant, err := tenant.NewTenant(req.GetProjectName(), req.NamespaceName) + if err != nil { + return nil, errors.GRPCErr(err, "unable to start replay for "+req.GetJobName()) + } + + jobName, err := scheduler.JobNameFrom(req.GetJobName()) + if err != nil { + return nil, errors.GRPCErr(err, "unable to start replay for "+req.GetJobName()) + } + + if err = req.GetStartTime().CheckValid(); err != nil { + return nil, errors.GRPCErr(errors.InvalidArgument(scheduler.EntityJobRun, "invalid start_time"), "unable to start replay for "+req.GetJobName()) + } + + if req.GetEndTime() != nil { + if err = req.GetEndTime().CheckValid(); err != nil { + return nil, errors.GRPCErr(errors.InvalidArgument(scheduler.EntityJobRun, "invalid end_time"), "unable to start replay for "+req.GetJobName()) + } + } + + jobConfig := make(map[string]string) + if req.JobConfig != "" { + jobConfig, err = parseJobConfig(req.JobConfig) + if err != nil { + return nil, errors.GRPCErr(err, "unable to parse replay job config for "+req.JobName) + } + } + + replayConfig := scheduler.NewReplayConfig(req.GetStartTime().AsTime(), req.GetEndTime().AsTime(), req.Parallel, jobConfig, req.Description) + replayID, err := h.service.CreateReplay(ctx, replayTenant, jobName, replayConfig) + if err != nil { + return nil, errors.GRPCErr(err, "unable to start replay for "+req.GetJobName()) + } + + return &pb.ReplayResponse{Id: replayID.String()}, nil +} + +func parseJobConfig(jobConfig string) (map[string]string, error) { + configs := map[string]string{} + for _, config := range strings.Split(jobConfig, ",") { + keyValue := strings.Split(config, "=") + valueLen := 2 + if len(keyValue) != valueLen { + return nil, fmt.Errorf("error on job config value, %s", config) + } + key := strings.TrimSpace(strings.ToUpper(keyValue[0])) + value := keyValue[1] + configs[key] = value + } + return configs, nil +} + +func NewReplayHandler(l log.Logger, service ReplayService) *ReplayHandler { + return &ReplayHandler{l: l, service: service} +} diff --git a/core/scheduler/handler/v1beta1/replay_test.go b/core/scheduler/handler/v1beta1/replay_test.go new file mode 100644 index 0000000000..5d53b70727 --- /dev/null +++ b/core/scheduler/handler/v1beta1/replay_test.go @@ -0,0 +1,227 @@ +package v1beta1_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/google/uuid" + "github.com/odpf/salt/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/scheduler/handler/v1beta1" + "github.com/odpf/optimus/core/tenant" + pb "github.com/odpf/optimus/protos/odpf/optimus/core/v1beta1" +) + +func TestReplayHandler(t *testing.T) { + logger := log.NewNoop() + ctx := context.Background() + projectName := "a-data-proj" + namespaceName := "a-namespace" + jobTenant, _ := tenant.NewTenant(projectName, namespaceName) + jobName, _ := scheduler.JobNameFrom("a-job-name") + startTime := timestamppb.New(time.Date(2023, 0o1, 0o1, 13, 0, 0, 0, time.UTC)) + endTime := timestamppb.New(time.Date(2023, 0o1, 0o2, 13, 0, 0, 0, time.UTC)) + jobConfigStr := "EXECUTION_PROJECT=example_project,ANOTHER_CONFIG=example_value" + jobConfig := map[string]string{"EXECUTION_PROJECT": "example_project", "ANOTHER_CONFIG": "example_value"} + description := "sample backfill" + replayID := uuid.New() + + t.Run("CreateReplay", func(t *testing.T) { + t.Run("returns replay ID when able to create replay successfully", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + replayConfig := scheduler.NewReplayConfig(req.StartTime.AsTime(), req.EndTime.AsTime(), false, jobConfig, description) + + service.On("CreateReplay", ctx, jobTenant, jobName, replayConfig).Return(replayID, nil) + + result, err := replayHandler.Replay(ctx, req) + assert.NoError(t, err) + assert.Equal(t, replayID.String(), result.Id) + }) + t.Run("returns replay ID when able to create replay successfully without overriding job config", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: false, + Description: description, + } + replayConfig := scheduler.NewReplayConfig(req.StartTime.AsTime(), req.EndTime.AsTime(), false, map[string]string{}, description) + + service.On("CreateReplay", ctx, jobTenant, jobName, replayConfig).Return(replayID, nil) + + result, err := replayHandler.Replay(ctx, req) + assert.NoError(t, err) + assert.Equal(t, replayID.String(), result.Id) + }) + t.Run("returns error when unable to create tenant", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + + result, err := replayHandler.Replay(ctx, req) + assert.Error(t, err) + assert.Nil(t, result) + }) + t.Run("returns error when job name is invalid", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + + result, err := replayHandler.Replay(ctx, req) + assert.Error(t, err) + assert.Nil(t, result) + }) + t.Run("returns error when start time is invalid", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + EndTime: endTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + + result, err := replayHandler.Replay(ctx, req) + assert.Error(t, err) + assert.Nil(t, result) + }) + t.Run("returns no error when end time is empty", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + replayConfig := scheduler.NewReplayConfig(req.StartTime.AsTime(), req.EndTime.AsTime(), false, jobConfig, description) + + service.On("CreateReplay", ctx, jobTenant, jobName, replayConfig).Return(replayID, nil) + + result, err := replayHandler.Replay(ctx, req) + assert.NoError(t, err) + assert.Equal(t, replayID.String(), result.Id) + }) + t.Run("returns error when end time is present but invalid", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: timestamppb.New(time.Date(-1, 13, 0o2, 13, 0, 0, 0, time.UTC)), + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + replayConfig := scheduler.NewReplayConfig(req.StartTime.AsTime(), req.EndTime.AsTime(), false, jobConfig, description) + + service.On("CreateReplay", ctx, jobTenant, jobName, replayConfig).Return(replayID, nil) + + result, err := replayHandler.Replay(ctx, req) + assert.Error(t, err) + assert.Nil(t, result) + }) + t.Run("returns error when unable to create replay", func(t *testing.T) { + service := new(mockReplayService) + replayHandler := v1beta1.NewReplayHandler(logger, service) + + req := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName.String(), + NamespaceName: namespaceName, + StartTime: startTime, + EndTime: endTime, + Parallel: false, + JobConfig: jobConfigStr, + Description: description, + } + replayConfig := scheduler.NewReplayConfig(req.StartTime.AsTime(), req.EndTime.AsTime(), false, jobConfig, description) + + service.On("CreateReplay", ctx, jobTenant, jobName, replayConfig).Return(uuid.Nil, errors.New("internal error")) + + result, err := replayHandler.Replay(ctx, req) + assert.ErrorContains(t, err, "internal error") + assert.Nil(t, result) + }) + }) +} + +// mockReplayService is an autogenerated mock type for the ReplayService type +type mockReplayService struct { + mock.Mock +} + +// CreateReplay provides a mock function with given fields: ctx, _a1, jobName, config +func (_m *mockReplayService) CreateReplay(ctx context.Context, _a1 tenant.Tenant, jobName scheduler.JobName, config *scheduler.ReplayConfig) (uuid.UUID, error) { + ret := _m.Called(ctx, _a1, jobName, config) + + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, scheduler.JobName, *scheduler.ReplayConfig) uuid.UUID); ok { + r0 = rf(ctx, _a1, jobName, config) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, tenant.Tenant, scheduler.JobName, *scheduler.ReplayConfig) error); ok { + r1 = rf(ctx, _a1, jobName, config) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/scheduler/replay.go b/core/scheduler/replay.go new file mode 100644 index 0000000000..8cd3b1942f --- /dev/null +++ b/core/scheduler/replay.go @@ -0,0 +1,131 @@ +package scheduler + +import ( + "sort" + "strings" + "time" + + "github.com/google/uuid" + + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/errors" +) + +const ( + ReplayStateCreated ReplayState = "created" + + ReplayStateInProgress ReplayState = "in progress" + ReplayStateInvalid ReplayState = "invalid" + ReplayStatePartialReplayed ReplayState = "partial replayed" + ReplayStateReplayed ReplayState = "replayed" + + ReplayStateSuccess ReplayState = "success" + ReplayStateFailed ReplayState = "failed" +) + +type ReplayState string + +func ReplayStateFromString(state string) (ReplayState, error) { + switch strings.ToLower(state) { + case string(ReplayStateCreated): + return ReplayStateCreated, nil + case string(ReplayStateInProgress): + return ReplayStateInProgress, nil + case string(ReplayStateInvalid): + return ReplayStateInvalid, nil + case string(ReplayStatePartialReplayed): + return ReplayStatePartialReplayed, nil + case string(ReplayStateReplayed): + return ReplayStateReplayed, nil + case string(ReplayStateSuccess): + return ReplayStateSuccess, nil + case string(ReplayStateFailed): + return ReplayStateFailed, nil + default: + return "", errors.InvalidArgument(EntityJobRun, "invalid state for replay "+state) + } +} + +func (j ReplayState) String() string { + return string(j) +} + +type Replay struct { + id uuid.UUID + + jobName JobName + tenant tenant.Tenant + config *ReplayConfig + + state ReplayState + message string + + createdAt time.Time +} + +func (r *Replay) ID() uuid.UUID { + return r.id +} + +func (r *Replay) JobName() JobName { + return r.jobName +} + +func (r *Replay) Tenant() tenant.Tenant { + return r.tenant +} + +func (r *Replay) Config() *ReplayConfig { + return r.config +} + +func (r *Replay) State() ReplayState { + return r.state +} + +func (r *Replay) Message() string { + return r.message +} + +func (r *Replay) CreatedAt() time.Time { + return r.createdAt +} + +func NewReplayRequest(jobName JobName, tenant tenant.Tenant, config *ReplayConfig, state ReplayState) *Replay { + return &Replay{jobName: jobName, tenant: tenant, config: config, state: state} +} + +func NewReplay(id uuid.UUID, jobName JobName, tenant tenant.Tenant, config *ReplayConfig, state ReplayState, createdAt time.Time) *Replay { + return &Replay{id: id, jobName: jobName, tenant: tenant, config: config, state: state, createdAt: createdAt} +} + +type ReplayWithRun struct { + Replay *Replay + Runs []*JobRunStatus // TODO: JobRunStatus does not have `message/log` +} + +func (r *ReplayWithRun) GetFirstExecutableRun() *JobRunStatus { + sort.Slice(r.Runs, func(i, j int) bool { + return r.Runs[i].ScheduledAt.Before(r.Runs[j].ScheduledAt) + }) + return r.Runs[0] +} + +func (r *ReplayWithRun) GetLastExecutableRun() *JobRunStatus { + sort.Slice(r.Runs, func(i, j int) bool { + return r.Runs[i].ScheduledAt.After(r.Runs[j].ScheduledAt) + }) + return r.Runs[0] +} + +type ReplayConfig struct { + StartTime time.Time + EndTime time.Time + Parallel bool + JobConfig map[string]string + Description string +} + +func NewReplayConfig(startTime, endTime time.Time, parallel bool, jobConfig map[string]string, description string) *ReplayConfig { + return &ReplayConfig{StartTime: startTime.UTC(), EndTime: endTime.UTC(), Parallel: parallel, JobConfig: jobConfig, Description: description} +} diff --git a/core/scheduler/replay_test.go b/core/scheduler/replay_test.go new file mode 100644 index 0000000000..d30e0cde74 --- /dev/null +++ b/core/scheduler/replay_test.go @@ -0,0 +1,118 @@ +package scheduler_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" +) + +func TestReplay(t *testing.T) { + replayID := uuid.New() + jobNameA, _ := scheduler.JobNameFrom("sample-job-A") + projName := tenant.ProjectName("proj") + namespaceName := tenant.ProjectName("ns1") + tnnt, _ := tenant.NewTenant(projName.String(), namespaceName.String()) + startTimeStr := "2023-01-02T00:00:00Z" + startTime, _ := time.Parse(scheduler.ISODateFormat, startTimeStr) + endTime := startTime.Add(48 * time.Hour) + replayDescription := "sample backfill" + replayJobConfig := map[string]string{"EXECUTION_PROJECT": "example_project"} + replayConfig := scheduler.NewReplayConfig(startTime, endTime, false, replayJobConfig, replayDescription) + scheduledTimeStr1 := "2023-01-02T12:00:00Z" + scheduledTime1, _ := time.Parse(scheduler.ISODateFormat, scheduledTimeStr1) + scheduledTime2 := scheduledTime1.Add(24 * time.Hour) + + t.Run("NewReplay", func(t *testing.T) { + createdTime := time.Now() + replay := scheduler.NewReplay(replayID, jobNameA, tnnt, replayConfig, scheduler.ReplayStateCreated, createdTime) + + assert.Equal(t, replayID, replay.ID()) + assert.Equal(t, jobNameA, replay.JobName()) + assert.Equal(t, tnnt, replay.Tenant()) + assert.Equal(t, replayConfig, replay.Config()) + assert.Equal(t, scheduler.ReplayStateCreated.String(), replay.State().String()) + assert.Equal(t, "", replay.Message()) + assert.Equal(t, createdTime, replay.CreatedAt()) + }) + + t.Run("NewReplayRequest", func(t *testing.T) { + replay := scheduler.NewReplayRequest(jobNameA, tnnt, replayConfig, scheduler.ReplayStateCreated) + + assert.Equal(t, uuid.Nil, replay.ID()) + assert.Equal(t, jobNameA, replay.JobName()) + assert.Equal(t, tnnt, replay.Tenant()) + assert.Equal(t, replayConfig, replay.Config()) + assert.Equal(t, scheduler.ReplayStateCreated.String(), replay.State().String()) + assert.Equal(t, "", replay.Message()) + }) + + t.Run("ReplayWithRun", func(t *testing.T) { + firstRun := &scheduler.JobRunStatus{ + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + } + secondRun := &scheduler.JobRunStatus{ + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + } + + t.Run("GetFirstExecutableRun", func(t *testing.T) { + replay := scheduler.NewReplay(replayID, jobNameA, tnnt, replayConfig, scheduler.ReplayStateCreated, time.Now()) + replayWithRun := &scheduler.ReplayWithRun{ + Replay: replay, + Runs: []*scheduler.JobRunStatus{ + firstRun, + secondRun, + }, + } + firstExecutableRun := replayWithRun.GetFirstExecutableRun() + assert.Equal(t, firstRun, firstExecutableRun) + }) + t.Run("GetLastExecutableRun", func(t *testing.T) { + replay := scheduler.NewReplay(replayID, jobNameA, tnnt, replayConfig, scheduler.ReplayStateCreated, time.Now()) + replayWithRun := &scheduler.ReplayWithRun{ + Replay: replay, + Runs: []*scheduler.JobRunStatus{ + firstRun, + secondRun, + }, + } + lastExecutableRun := replayWithRun.GetLastExecutableRun() + assert.Equal(t, secondRun, lastExecutableRun) + }) + }) + + t.Run("ReplayStateFromString", func(t *testing.T) { + expectationsMap := map[string]scheduler.ReplayState{ + "created": scheduler.ReplayStateCreated, + "CREATED": scheduler.ReplayStateCreated, + "in progress": scheduler.ReplayStateInProgress, + "IN PROGRESS": scheduler.ReplayStateInProgress, + "invalid": scheduler.ReplayStateInvalid, + "INVALID": scheduler.ReplayStateInvalid, + "partial replayed": scheduler.ReplayStatePartialReplayed, + "PARTIAL REPLAYED": scheduler.ReplayStatePartialReplayed, + "replayed": scheduler.ReplayStateReplayed, + "REPLAYED": scheduler.ReplayStateReplayed, + "success": scheduler.ReplayStateSuccess, + "SUCCESS": scheduler.ReplayStateSuccess, + "failed": scheduler.ReplayStateFailed, + "FAILED": scheduler.ReplayStateFailed, + } + for input, expectedState := range expectationsMap { + respState, err := scheduler.ReplayStateFromString(input) + assert.Nil(t, err) + assert.Equal(t, expectedState, respState) + } + + respState, err := scheduler.ReplayStateFromString("unregisteredState") + assert.NotNil(t, err) + assert.EqualError(t, err, "invalid argument for entity jobRun: invalid state for replay unregisteredState") + assert.Equal(t, scheduler.ReplayState(""), respState) + }) +} diff --git a/core/scheduler/service/deployment_service_test.go b/core/scheduler/service/deployment_service_test.go index ca4b4eaebc..300a40aef0 100644 --- a/core/scheduler/service/deployment_service_test.go +++ b/core/scheduler/service/deployment_service_test.go @@ -77,7 +77,7 @@ func TestDeploymentService(t *testing.T) { defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(nil, - jobRepo, nil, nil, nil, nil, nil) + jobRepo, nil, nil, nil, nil, nil, nil) err := runService.UploadToScheduler(ctx, proj1Name) assert.NotNil(t, err) @@ -93,7 +93,7 @@ func TestDeploymentService(t *testing.T) { defer priorityResolver.AssertExpectations(t) runService := service.NewJobRunService(nil, - jobRepo, nil, nil, nil, priorityResolver, nil) + jobRepo, nil, nil, nil, nil, priorityResolver, nil) err := runService.UploadToScheduler(ctx, proj1Name) assert.NotNil(t, err) @@ -113,7 +113,7 @@ func TestDeploymentService(t *testing.T) { Return(fmt.Errorf("DeployJobs tnnt1 error")) defer mScheduler.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, mScheduler, priorityResolver, nil) err := runService.UploadToScheduler(ctx, proj1Name) @@ -141,7 +141,7 @@ func TestDeploymentService(t *testing.T) { mScheduler.On("DeleteJobs", ctx, tnnt2, []string{"job4-to-delete"}).Return(nil) defer mScheduler.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, mScheduler, priorityResolver, nil) err := runService.UploadToScheduler(ctx, proj1Name) @@ -166,7 +166,7 @@ func TestDeploymentService(t *testing.T) { mScheduler.On("DeleteJobs", ctx, tnnt2, []string{"job4-to-delete"}).Return(nil) defer mScheduler.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, mScheduler, priorityResolver, nil) err := runService.UploadToScheduler(ctx, proj1Name) diff --git a/core/scheduler/service/job_run_service.go b/core/scheduler/service/job_run_service.go index a163655a77..56771e8322 100644 --- a/core/scheduler/service/job_run_service.go +++ b/core/scheduler/service/job_run_service.go @@ -40,6 +40,10 @@ type JobRunRepository interface { UpdateMonitoring(ctx context.Context, jobRunID uuid.UUID, monitoring map[string]any) error } +type JobReplayRepository interface { + GetReplayJobConfig(ctx context.Context, jobTenant tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (map[string]string, error) +} + type OperatorRunRepository interface { GetOperatorRun(ctx context.Context, operatorName string, operator scheduler.OperatorType, jobRunID uuid.UUID) (*scheduler.OperatorRun, error) CreateOperatorRun(ctx context.Context, operatorName string, operator scheduler.OperatorType, jobRunID uuid.UUID, startTime time.Time) error @@ -64,6 +68,7 @@ type Scheduler interface { type JobRunService struct { l log.Logger repo JobRunRepository + replayRepo JobReplayRepository operatorRunRepo OperatorRunRepository scheduler Scheduler jobRepo JobRepository @@ -90,6 +95,15 @@ func (s *JobRunService) JobRunInput(ctx context.Context, projectName tenant.Proj } else { executedAt = jobRun.StartTime } + // Additional task config from existing replay + replayJobConfig, err := s.replayRepo.GetReplayJobConfig(ctx, job.Tenant, job.Name, config.ScheduledAt) + if err != nil { + return nil, err + } + for k, v := range replayJobConfig { + job.Task.Config[k] = v + } + return s.compiler.Compile(ctx, job, config, executedAt) } @@ -400,7 +414,7 @@ func (s *JobRunService) UpdateJobState(ctx context.Context, event *scheduler.Eve } } -func NewJobRunService(logger log.Logger, jobRepo JobRepository, jobRunRepo JobRunRepository, +func NewJobRunService(logger log.Logger, jobRepo JobRepository, jobRunRepo JobRunRepository, replayRepo JobReplayRepository, operatorRunRepo OperatorRunRepository, scheduler Scheduler, resolver PriorityResolver, compiler JobInputCompiler, ) *JobRunService { return &JobRunService{ @@ -408,6 +422,7 @@ func NewJobRunService(logger log.Logger, jobRepo JobRepository, jobRunRepo JobRu repo: jobRunRepo, operatorRunRepo: operatorRunRepo, scheduler: scheduler, + replayRepo: replayRepo, jobRepo: jobRepo, priorityResolver: resolver, compiler: compiler, diff --git a/core/scheduler/service/job_run_service_test.go b/core/scheduler/service/job_run_service_test.go index d7be7bd5a7..e78dbf1b80 100644 --- a/core/scheduler/service/job_run_service_test.go +++ b/core/scheduler/service/job_run_service_test.go @@ -37,7 +37,7 @@ func TestJobRunService(t *testing.T) { t.Run("should reject unregistered events", func(t *testing.T) { runService := service.NewJobRunService(logger, - nil, nil, nil, nil, nil, nil) + nil, nil, nil, nil, nil, nil, nil) event := &scheduler.Event{ JobName: jobName, @@ -60,7 +60,7 @@ func TestJobRunService(t *testing.T) { defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepository, nil, nil, nil, nil) + jobRepo, jobRunRepository, nil, nil, nil, nil, nil) event := &scheduler.Event{ JobName: jobName, @@ -105,7 +105,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepository, nil, nil, nil, nil) + jobRepo, jobRunRepository, nil, nil, nil, nil, nil) event := &scheduler.Event{ JobName: jobName, @@ -180,7 +180,7 @@ func TestJobRunService(t *testing.T) { defer operatorRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, operatorRunRepo, nil, nil, nil) + jobRepo, jobRunRepo, nil, operatorRunRepo, nil, nil, nil) err = runService.UpdateJobState(ctx, event) assert.Nil(t, err) @@ -219,7 +219,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, nil, nil, nil, nil) + nil, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.Nil(t, err) @@ -279,7 +279,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, nil) + jobRepo, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -292,7 +292,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, nil) + jobRepo, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -305,7 +305,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, nil) + jobRepo, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -321,7 +321,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, nil) + jobRepo, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.Nil(t, err) @@ -347,7 +347,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, nil, nil, nil, nil) + nil, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -370,7 +370,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, nil, nil, nil, nil) + nil, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -393,7 +393,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, nil, nil, nil, nil) + nil, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -430,7 +430,7 @@ func TestJobRunService(t *testing.T) { defer operatorRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, operatorRunRepository, nil, nil, nil) + nil, jobRunRepo, nil, operatorRunRepository, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.Nil(t, err) @@ -478,7 +478,7 @@ func TestJobRunService(t *testing.T) { operatorRunRepository.On("CreateOperatorRun", ctx, event.OperatorName, scheduler.OperatorTask, jobRun.ID, eventTime).Return(fmt.Errorf("some error in creating operator run")) defer operatorRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, operatorRunRepository, nil, nil, nil) + nil, jobRunRepo, nil, operatorRunRepository, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -491,7 +491,7 @@ func TestJobRunService(t *testing.T) { operatorRunRepository.On("GetOperatorRun", ctx, event.OperatorName, scheduler.OperatorTask, jobRun.ID).Return(nil, fmt.Errorf("some error in getting operator run")).Once() defer operatorRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, operatorRunRepository, nil, nil, nil) + nil, jobRunRepo, nil, operatorRunRepository, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -503,7 +503,7 @@ func TestJobRunService(t *testing.T) { operatorRunRepository.On("UpdateOperatorRun", ctx, scheduler.OperatorTask, operatorRun.ID, eventTime, scheduler.StateSuccess).Return(nil) defer operatorRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, operatorRunRepository, nil, nil, nil) + nil, jobRunRepo, nil, operatorRunRepository, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.Nil(t, err) @@ -529,7 +529,7 @@ func TestJobRunService(t *testing.T) { defer jobRunRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, nil, nil, nil, nil) + nil, jobRunRepo, nil, nil, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -566,7 +566,7 @@ func TestJobRunService(t *testing.T) { // operatorRunRepository.On("UpdateOperatorRun", ctx, scheduler.OperatorSensor, operatorRun.ID, eventTime, "success").Return(nil) defer operatorRunRepository.AssertExpectations(t) runService := service.NewJobRunService(logger, - nil, jobRunRepo, operatorRunRepository, nil, nil, nil) + nil, jobRunRepo, nil, operatorRunRepository, nil, nil, nil) err := runService.UpdateJobState(ctx, event) assert.NotNil(t, err) @@ -582,7 +582,7 @@ func TestJobRunService(t *testing.T) { defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, nil, nil, nil, nil, nil) + jobRepo, nil, nil, nil, nil, nil, nil) executorInput, err := runService.JobRunInput(ctx, projName, jobName, scheduler.RunConfig{}) assert.Nil(t, executorInput) assert.NotNil(t, err) @@ -593,6 +593,9 @@ func TestJobRunService(t *testing.T) { job := scheduler.Job{ Name: jobName, Tenant: tnnt, + Task: &scheduler.Task{ + Config: map[string]string{}, + }, } someScheduleTime := todayDate.Add(time.Hour * 24 * -1) @@ -624,14 +627,20 @@ func TestJobRunService(t *testing.T) { "someKey": "someValue", }, } + jobToCompile := job + jobToCompile.Task.Config["EXECUTION_PROJECT"] = "example" + + jobReplayRepo := new(ReplayRepository) + jobReplayRepo.On("GetReplayJobConfig", ctx, tnnt, jobName, someScheduleTime).Return(map[string]string{"EXECUTION_PROJECT": "example"}, nil) + defer jobReplayRepo.AssertExpectations(t) jobInputCompiler := new(mockJobInputCompiler) - jobInputCompiler.On("Compile", ctx, &job, runConfig, executedAt). + jobInputCompiler.On("Compile", ctx, &jobToCompile, runConfig, executedAt). Return(&dummyExecutorInput, nil) defer jobInputCompiler.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, jobInputCompiler) + jobRepo, jobRunRepo, jobReplayRepo, nil, nil, nil, jobInputCompiler) executorInput, err := runService.JobRunInput(ctx, projName, jobName, runConfig) assert.Equal(t, &dummyExecutorInput, executorInput) @@ -642,6 +651,9 @@ func TestJobRunService(t *testing.T) { job := scheduler.Job{ Name: jobName, Tenant: tnnt, + Task: &scheduler.Task{ + Config: map[string]string{}, + }, } someScheduleTime := todayDate.Add(time.Hour * 24 * -1) @@ -675,13 +687,20 @@ func TestJobRunService(t *testing.T) { }, } + jobToCompile := job + jobToCompile.Task.Config["EXECUTION_PROJECT"] = "example" + + jobReplayRepo := new(ReplayRepository) + jobReplayRepo.On("GetReplayJobConfig", ctx, tnnt, jobName, someScheduleTime).Return(map[string]string{"EXECUTION_PROJECT": "example"}, nil) + defer jobReplayRepo.AssertExpectations(t) + jobInputCompiler := new(mockJobInputCompiler) - jobInputCompiler.On("Compile", ctx, &job, runConfig, executedAt). + jobInputCompiler.On("Compile", ctx, &jobToCompile, runConfig, executedAt). Return(&dummyExecutorInput, nil) defer jobInputCompiler.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, jobInputCompiler) + jobRepo, jobRunRepo, jobReplayRepo, nil, nil, nil, jobInputCompiler) executorInput, err := runService.JobRunInput(ctx, projName, jobName, runConfig) assert.Equal(t, &dummyExecutorInput, executorInput) @@ -692,6 +711,9 @@ func TestJobRunService(t *testing.T) { job := scheduler.Job{ Name: jobName, Tenant: tnnt, + Task: &scheduler.Task{ + Config: map[string]string{}, + }, } someScheduleTime := todayDate.Add(time.Hour * 24 * -1) @@ -718,13 +740,20 @@ func TestJobRunService(t *testing.T) { }, } + jobToCompile := job + jobToCompile.Task.Config["EXECUTION_PROJECT"] = "example" + + jobReplayRepo := new(ReplayRepository) + jobReplayRepo.On("GetReplayJobConfig", ctx, tnnt, jobName, someScheduleTime).Return(map[string]string{"EXECUTION_PROJECT": "example"}, nil) + defer jobReplayRepo.AssertExpectations(t) + jobInputCompiler := new(mockJobInputCompiler) - jobInputCompiler.On("Compile", ctx, &job, runConfig, someScheduleTime). + jobInputCompiler.On("Compile", ctx, &jobToCompile, runConfig, someScheduleTime). Return(&dummyExecutorInput, nil) defer jobInputCompiler.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, jobInputCompiler) + jobRepo, jobRunRepo, jobReplayRepo, nil, nil, nil, jobInputCompiler) executorInput, err := runService.JobRunInput(ctx, projName, jobName, runConfig) assert.Equal(t, &dummyExecutorInput, executorInput) @@ -735,6 +764,9 @@ func TestJobRunService(t *testing.T) { job := scheduler.Job{ Name: jobName, Tenant: tnnt, + Task: &scheduler.Task{ + Config: map[string]string{}, + }, } someScheduleTime := todayDate.Add(time.Hour * 24 * -1) @@ -760,13 +792,20 @@ func TestJobRunService(t *testing.T) { "someKey": "someValue", }, } + jobToCompile := job + jobToCompile.Task.Config["EXECUTION_PROJECT"] = "example" + + jobReplayRepo := new(ReplayRepository) + jobReplayRepo.On("GetReplayJobConfig", ctx, tnnt, jobName, someScheduleTime).Return(map[string]string{"EXECUTION_PROJECT": "example"}, nil) + defer jobReplayRepo.AssertExpectations(t) + jobInputCompiler := new(mockJobInputCompiler) - jobInputCompiler.On("Compile", ctx, &job, runConfig, someScheduleTime). + jobInputCompiler.On("Compile", ctx, &jobToCompile, runConfig, someScheduleTime). Return(&dummyExecutorInput, nil) defer jobInputCompiler.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, jobRunRepo, nil, nil, nil, jobInputCompiler) + jobRepo, jobRunRepo, jobReplayRepo, nil, nil, nil, jobInputCompiler) executorInput, err := runService.JobRunInput(ctx, projName, jobName, runConfig) assert.Nil(t, err) @@ -799,7 +838,7 @@ func TestJobRunService(t *testing.T) { defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, nil, nil, nil, nil, nil) + jobRepo, nil, nil, nil, nil, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, criteria) assert.NotNil(t, err) assert.EqualError(t, err, "unable to get job details from DB for jobName: sample_select, project:proj, error:some error in get job details ") @@ -837,7 +876,7 @@ func TestJobRunService(t *testing.T) { defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, nil, nil, sch, nil, nil) + jobRepo, nil, nil, nil, sch, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, criteria) assert.Nil(t, err) assert.Nil(t, nil, returnedRuns) @@ -972,7 +1011,7 @@ func TestJobRunService(t *testing.T) { jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) runService := service.NewJobRunService(logger, - jobRepo, nil, nil, + jobRepo, nil, nil, nil, sch, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, scenario.input) assert.Nil(t, err) @@ -1009,7 +1048,7 @@ func TestJobRunService(t *testing.T) { jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil) + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, jobQuery) assert.NotNil(t, err) assert.EqualError(t, err, "invalid date range") @@ -1044,7 +1083,7 @@ func TestJobRunService(t *testing.T) { jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil) + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, jobQuery) assert.NotNil(t, err) assert.EqualError(t, err, "unable to parse job cron interval expected exactly 5 fields, found 2: [invalid interval]") @@ -1078,7 +1117,7 @@ func TestJobRunService(t *testing.T) { jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil) + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, jobQuery) assert.NotNil(t, err) assert.EqualError(t, err, "job schedule interval not found") @@ -1110,7 +1149,7 @@ func TestJobRunService(t *testing.T) { jobRepo := new(JobRepository) jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil) + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, nil, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, jobQuery) assert.NotNil(t, err) assert.EqualError(t, err, "job schedule startDate not found in job fetched from DB") @@ -1153,7 +1192,7 @@ func TestJobRunService(t *testing.T) { jobRepo.On("GetJobDetails", ctx, projName, jobName).Return(&jobWithDetails, nil) defer jobRepo.AssertExpectations(t) - runService := service.NewJobRunService(logger, jobRepo, nil, nil, sch, nil, nil) + runService := service.NewJobRunService(logger, jobRepo, nil, nil, nil, sch, nil, nil) returnedRuns, err := runService.GetJobRuns(ctx, projName, jobName, criteria) assert.Nil(t, err) assert.Equal(t, runs, returnedRuns) diff --git a/core/scheduler/service/replay_manager.go b/core/scheduler/service/replay_manager.go new file mode 100644 index 0000000000..69ebad5e2f --- /dev/null +++ b/core/scheduler/service/replay_manager.go @@ -0,0 +1,97 @@ +package service + +import ( + "time" + + "github.com/odpf/salt/log" + "github.com/robfig/cron/v3" + "golang.org/x/net/context" + + "github.com/odpf/optimus/config" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/internal/errors" +) + +const ( + syncInterval = "@every 1m" +) + +type ReplayManager struct { + l log.Logger + + replayRepository ReplayRepository + replayWorker Worker + + schedule *cron.Cron + Now func() time.Time + + config config.ReplayConfig +} + +func NewReplayManager(l log.Logger, replayRepository ReplayRepository, replayWorker Worker, now func() time.Time, config config.ReplayConfig) *ReplayManager { + return &ReplayManager{ + l: l, + replayRepository: replayRepository, + replayWorker: replayWorker, + Now: now, + config: config, + schedule: cron.New(cron.WithChain( + cron.SkipIfStillRunning(cron.DefaultLogger), + )), + } +} + +type Worker interface { + Process(*scheduler.ReplayWithRun) +} + +func (m ReplayManager) Initialize() { + if m.schedule != nil { + _, err := m.schedule.AddFunc(syncInterval, m.StartReplayLoop) + if err != nil { + m.l.Error("Failed to sync replay", "error", err) + } + m.schedule.Start() + } +} + +func (m ReplayManager) StartReplayLoop() { + ctx := context.Background() + + // Cancel timed out replay with status [created, in progress, partial replayed, replayed] + m.checkTimedOutReplay(ctx) + + // Fetch created, in progress, and replayed request + replayToExecute, err := m.replayRepository.GetReplayToExecute(ctx) + if err != nil { + if errors.IsErrorType(err, errors.ErrNotFound) { + m.l.Debug("no replay request found to execute") + } else { + m.l.Error("unable to get replay requests to execute: %w", err) + } + return + } + go m.replayWorker.Process(replayToExecute) +} + +func (m ReplayManager) checkTimedOutReplay(ctx context.Context) { + onGoingReplays, err := m.replayRepository.GetReplayRequestsByStatus(ctx, []scheduler.ReplayState{ + scheduler.ReplayStateCreated, + scheduler.ReplayStateInProgress, scheduler.ReplayStatePartialReplayed, scheduler.ReplayStateReplayed, + }) + if err != nil { + m.l.Error("unable to get on going replay") + } + + for _, replay := range onGoingReplays { + runningTime := m.Now().Sub(replay.CreatedAt()) + if runningTime < m.config.ReplayTimeout { + continue + } + message := "replay timed out" + if err := m.replayRepository.UpdateReplayStatus(ctx, replay.ID(), scheduler.ReplayStateFailed, message); err != nil { + m.l.Error("unable to mark replay %s as failed due to time out", replay.ID()) + } + m.l.Info("replay %s timed out. marked as failed.", replay.ID()) + } +} diff --git a/core/scheduler/service/replay_manager_test.go b/core/scheduler/service/replay_manager_test.go new file mode 100644 index 0000000000..1af1588ae4 --- /dev/null +++ b/core/scheduler/service/replay_manager_test.go @@ -0,0 +1,70 @@ +package service_test + +import ( + "errors" + "testing" + "time" + + "github.com/google/uuid" + "github.com/odpf/salt/log" + "golang.org/x/net/context" + + "github.com/odpf/optimus/config" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/scheduler/service" + "github.com/odpf/optimus/core/tenant" +) + +func TestReplayManager(t *testing.T) { + ctx := context.Background() + logger := log.NewNoop() + currentTime := func() time.Time { return time.Now() } + conf := config.ReplayConfig{ReplayTimeout: time.Hour * 3} + replaysToCheck := []scheduler.ReplayState{ + scheduler.ReplayStateCreated, scheduler.ReplayStateInProgress, + scheduler.ReplayStatePartialReplayed, scheduler.ReplayStateReplayed, + } + replayID := uuid.New() + jobName := scheduler.JobName("sample_select") + replayStartTimeStr := "2023-01-03T12:00:00Z" + replayStartTime, _ := time.Parse(scheduler.ISODateFormat, replayStartTimeStr) + replayEndTime := replayStartTime.Add(24 * time.Hour) + replayDescription := "for backfill" + replayReqConf := scheduler.NewReplayConfig(replayStartTime, replayEndTime, false, map[string]string{}, replayDescription) + projName := tenant.ProjectName("proj") + namespaceName := tenant.ProjectName("ns1") + tnnt, _ := tenant.NewTenant(projName.String(), namespaceName.String()) + + t.Run("StartReplayLoop", func(t *testing.T) { + t.Run("should not proceed on the timeout process if unable to get replay requests by status", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + err := errors.New("internal error") + replayRepository.On("GetReplayRequestsByStatus", ctx, replaysToCheck).Return(nil, err) + replayRepository.On("GetReplayToExecute", ctx).Return(nil, err) + + replayManager := service.NewReplayManager(logger, replayRepository, nil, currentTime, conf) + replayManager.StartReplayLoop() + }) + t.Run("should mark replay request as failed if it is timed out", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + replayCreatedTime1 := time.Now().Add(-24 * time.Hour) + replayCreatedTime2 := time.Now().Add(-1 * time.Hour) + + replayReq1 := scheduler.NewReplay(replayID, jobName, tnnt, replayReqConf, scheduler.ReplayStateInProgress, replayCreatedTime1) + replayReq2 := scheduler.NewReplay(uuid.New(), "other_job", tnnt, replayReqConf, scheduler.ReplayStateInProgress, replayCreatedTime2) + + replayRepository.On("GetReplayRequestsByStatus", ctx, replaysToCheck).Return([]*scheduler.Replay{replayReq1, replayReq2}, nil) + replayRepository.On("UpdateReplayStatus", ctx, replayID, scheduler.ReplayStateFailed, "replay timed out").Return(nil).Once() + + err := errors.New("internal error") + replayRepository.On("GetReplayToExecute", ctx).Return(nil, err) + + replayManager := service.NewReplayManager(logger, replayRepository, nil, currentTime, conf) + replayManager.StartReplayLoop() + }) + }) +} diff --git a/core/scheduler/service/replay_service.go b/core/scheduler/service/replay_service.go new file mode 100644 index 0000000000..20fb897f1d --- /dev/null +++ b/core/scheduler/service/replay_service.go @@ -0,0 +1,56 @@ +package service + +import ( + "fmt" + + "github.com/google/uuid" + "golang.org/x/net/context" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/lib/cron" +) + +type ReplayRepository interface { + RegisterReplay(ctx context.Context, replay *scheduler.Replay, runs []*scheduler.JobRunStatus) (uuid.UUID, error) + UpdateReplay(ctx context.Context, replayID uuid.UUID, state scheduler.ReplayState, runs []*scheduler.JobRunStatus, message string) error + UpdateReplayStatus(ctx context.Context, replayID uuid.UUID, state scheduler.ReplayState, message string) error + + GetReplayToExecute(context.Context) (*scheduler.ReplayWithRun, error) + GetReplayRequestsByStatus(ctx context.Context, statusList []scheduler.ReplayState) ([]*scheduler.Replay, error) +} + +type ReplayValidator interface { + Validate(ctx context.Context, replayRequest *scheduler.Replay, jobCron *cron.ScheduleSpec) error +} + +type ReplayService struct { + replayRepo ReplayRepository + jobRepo JobRepository + + validator ReplayValidator +} + +func (r ReplayService) CreateReplay(ctx context.Context, tenant tenant.Tenant, jobName scheduler.JobName, config *scheduler.ReplayConfig) (replayID uuid.UUID, err error) { + subjectJob, err := r.jobRepo.GetJobDetails(ctx, tenant.ProjectName(), jobName) + if err != nil { + return uuid.Nil, fmt.Errorf("unable to get job details from DB for jobName: %s, project:%s, error:%w ", jobName, tenant.ProjectName().String(), err) + } + + jobCron, err := cron.ParseCronSchedule(subjectJob.Schedule.Interval) + if err != nil { + return uuid.Nil, fmt.Errorf("encountered unexpected error when parsing job cron interval for job %s: %w", jobName, err) + } + + replayReq := scheduler.NewReplayRequest(jobName, tenant, config, scheduler.ReplayStateCreated) + if err := r.validator.Validate(ctx, replayReq, jobCron); err != nil { + return uuid.Nil, err + } + + runs := getExpectedRuns(jobCron, config.StartTime, config.EndTime) + return r.replayRepo.RegisterReplay(ctx, replayReq, runs) +} + +func NewReplayService(replayRepo ReplayRepository, jobRepo JobRepository, validator ReplayValidator) *ReplayService { + return &ReplayService{replayRepo: replayRepo, jobRepo: jobRepo, validator: validator} +} diff --git a/core/scheduler/service/replay_service_test.go b/core/scheduler/service/replay_service_test.go new file mode 100644 index 0000000000..9fee3db3bf --- /dev/null +++ b/core/scheduler/service/replay_service_test.go @@ -0,0 +1,266 @@ +package service_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/scheduler/service" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/lib/cron" +) + +func TestReplayService(t *testing.T) { + ctx := context.Background() + projName := tenant.ProjectName("proj") + namespaceName := tenant.ProjectName("ns1") + jobName := scheduler.JobName("sample_select") + startTimeStr := "2023-01-02T15:00:00Z" + startTime, _ := time.Parse(scheduler.ISODateFormat, startTimeStr) + endTime := startTime.Add(48 * time.Hour) + tnnt, _ := tenant.NewTenant(projName.String(), namespaceName.String()) + parallel := true + description := "sample backfill" + replayJobConfig := map[string]string{"EXECUTION_PROJECT": "example_project"} + replayConfig := scheduler.NewReplayConfig(startTime, endTime, parallel, replayJobConfig, description) + replayID := uuid.New() + job := scheduler.Job{ + Name: jobName, + Tenant: tnnt, + } + jobWithDetails := &scheduler.JobWithDetails{ + Job: &job, + JobMetadata: &scheduler.JobMetadata{ + Version: 1, + }, + Schedule: &scheduler.Schedule{ + StartDate: startTime.Add(-time.Hour * 24), + Interval: "0 12 * * *", + }, + } + jobCronStr := "0 12 * * *" + jobCron, _ := cron.ParseCronSchedule(jobCronStr) + + t.Run("CreateReplay", func(t *testing.T) { + t.Run("should return replay ID if replay created successfully", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayValidator := new(ReplayValidator) + defer replayValidator.AssertExpectations(t) + + scheduledTime1Str := "2023-01-03T12:00:00Z" + scheduledTime1, _ := time.Parse(scheduler.ISODateFormat, scheduledTime1Str) + scheduledTime2 := scheduledTime1.Add(24 * time.Hour) + replayRuns := []*scheduler.JobRunStatus{ + {ScheduledAt: scheduledTime1, State: scheduler.StatePending}, + {ScheduledAt: scheduledTime2, State: scheduler.StatePending}, + } + replayReq := scheduler.NewReplayRequest(jobName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + jobRepository.On("GetJobDetails", ctx, projName, jobName).Return(jobWithDetails, nil) + replayValidator.On("Validate", ctx, replayReq, jobCron).Return(nil) + replayRepository.On("RegisterReplay", ctx, replayReq, replayRuns).Return(replayID, nil) + + replayService := service.NewReplayService(replayRepository, jobRepository, replayValidator) + result, err := replayService.CreateReplay(ctx, tnnt, jobName, replayConfig) + assert.NoError(t, err) + assert.Equal(t, replayID, result) + }) + + t.Run("should return error if not pass validation", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayValidator := new(ReplayValidator) + defer replayValidator.AssertExpectations(t) + + replayReq := scheduler.NewReplayRequest(jobName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + jobRepository.On("GetJobDetails", ctx, projName, jobName).Return(jobWithDetails, nil) + replayValidator.On("Validate", ctx, replayReq, jobCron).Return(errors.New("not passed validation")) + + replayService := service.NewReplayService(replayRepository, jobRepository, replayValidator) + result, err := replayService.CreateReplay(ctx, tnnt, jobName, replayConfig) + assert.ErrorContains(t, err, "not passed validation") + assert.Equal(t, uuid.Nil, result) + }) + + t.Run("should return error if unable to get job details", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayValidator := new(ReplayValidator) + defer replayValidator.AssertExpectations(t) + + internalErr := errors.New("internal error") + jobRepository.On("GetJobDetails", ctx, projName, jobName).Return(nil, internalErr) + + replayService := service.NewReplayService(replayRepository, jobRepository, replayValidator) + result, err := replayService.CreateReplay(ctx, tnnt, jobName, replayConfig) + assert.ErrorIs(t, err, internalErr) + assert.Equal(t, uuid.Nil, result) + }) + }) +} + +// ReplayRepository is an autogenerated mock type for the ReplayRepository type +type ReplayRepository struct { + mock.Mock +} + +// GetReplayRequestsByStatus provides a mock function with given fields: ctx, statusList +func (_m *ReplayRepository) GetReplayRequestsByStatus(ctx context.Context, statusList []scheduler.ReplayState) ([]*scheduler.Replay, error) { + ret := _m.Called(ctx, statusList) + + var r0 []*scheduler.Replay + if rf, ok := ret.Get(0).(func(context.Context, []scheduler.ReplayState) []*scheduler.Replay); ok { + r0 = rf(ctx, statusList) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*scheduler.Replay) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []scheduler.ReplayState) error); ok { + r1 = rf(ctx, statusList) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetReplayToExecute provides a mock function with given fields: _a0 +func (_m *ReplayRepository) GetReplayToExecute(_a0 context.Context) (*scheduler.ReplayWithRun, error) { + ret := _m.Called(_a0) + + var r0 *scheduler.ReplayWithRun + if rf, ok := ret.Get(0).(func(context.Context) *scheduler.ReplayWithRun); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*scheduler.ReplayWithRun) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterReplay provides a mock function with given fields: ctx, replay, runs +func (_m *ReplayRepository) RegisterReplay(ctx context.Context, replay *scheduler.Replay, runs []*scheduler.JobRunStatus) (uuid.UUID, error) { + ret := _m.Called(ctx, replay, runs) + + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func(context.Context, *scheduler.Replay, []*scheduler.JobRunStatus) uuid.UUID); ok { + r0 = rf(ctx, replay, runs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *scheduler.Replay, []*scheduler.JobRunStatus) error); ok { + r1 = rf(ctx, replay, runs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateReplay provides a mock function with given fields: ctx, replayID, state, runs, message +func (_m *ReplayRepository) UpdateReplay(ctx context.Context, replayID uuid.UUID, state scheduler.ReplayState, runs []*scheduler.JobRunStatus, message string) error { + ret := _m.Called(ctx, replayID, state, runs, message) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, scheduler.ReplayState, []*scheduler.JobRunStatus, string) error); ok { + r0 = rf(ctx, replayID, state, runs, message) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateReplayStatus provides a mock function with given fields: ctx, replayID, state, message +func (_m *ReplayRepository) UpdateReplayStatus(ctx context.Context, replayID uuid.UUID, state scheduler.ReplayState, message string) error { + ret := _m.Called(ctx, replayID, state, message) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, scheduler.ReplayState, string) error); ok { + r0 = rf(ctx, replayID, state, message) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetReplayJobConfig provides a mock function with given fields: ctx, jobTenant, jobName, scheduledAt +func (_m *ReplayRepository) GetReplayJobConfig(ctx context.Context, jobTenant tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (map[string]string, error) { + ret := _m.Called(ctx, jobTenant, jobName, scheduledAt) + + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, scheduler.JobName, time.Time) (map[string]string, error)); ok { + return rf(ctx, jobTenant, jobName, scheduledAt) + } + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, scheduler.JobName, time.Time) map[string]string); ok { + r0 = rf(ctx, jobTenant, jobName, scheduledAt) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, tenant.Tenant, scheduler.JobName, time.Time) error); ok { + r1 = rf(ctx, jobTenant, jobName, scheduledAt) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReplayValidator is an autogenerated mock type for the ReplayValidator type +type ReplayValidator struct { + mock.Mock +} + +// Validate provides a mock function with given fields: ctx, replayRequest, jobCron +func (_m *ReplayValidator) Validate(ctx context.Context, replayRequest *scheduler.Replay, jobCron *cron.ScheduleSpec) error { + ret := _m.Called(ctx, replayRequest, jobCron) + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *scheduler.Replay, *cron.ScheduleSpec) error); ok { + r0 = rf(ctx, replayRequest, jobCron) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/scheduler/service/replay_validator.go b/core/scheduler/service/replay_validator.go new file mode 100644 index 0000000000..721c3d8e19 --- /dev/null +++ b/core/scheduler/service/replay_validator.go @@ -0,0 +1,68 @@ +package service + +import ( + "golang.org/x/net/context" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/internal/errors" + "github.com/odpf/optimus/internal/lib/cron" +) + +var replayStatusToValidate = []scheduler.ReplayState{ + scheduler.ReplayStateCreated, scheduler.ReplayStateInProgress, + scheduler.ReplayStatePartialReplayed, scheduler.ReplayStateReplayed, +} + +type Validator struct { + replayRepository ReplayRepository + scheduler ReplayScheduler +} + +func NewValidator(replayRepository ReplayRepository, scheduler ReplayScheduler) *Validator { + return &Validator{replayRepository: replayRepository, scheduler: scheduler} +} + +func (v Validator) Validate(ctx context.Context, replayRequest *scheduler.Replay, jobCron *cron.ScheduleSpec) error { + if err := v.validateConflictedReplay(ctx, replayRequest); err != nil { + return err + } + + return v.validateConflictedRun(ctx, replayRequest, jobCron) +} + +func (v Validator) validateConflictedReplay(ctx context.Context, replayRequest *scheduler.Replay) error { + onGoingReplays, err := v.replayRepository.GetReplayRequestsByStatus(ctx, replayStatusToValidate) + if err != nil { + return err + } + for _, onGoingReplay := range onGoingReplays { + if onGoingReplay.Tenant() != replayRequest.Tenant() || onGoingReplay.JobName() != replayRequest.JobName() { + continue + } + + // Check any intersection of date range + if (onGoingReplay.Config().StartTime.Equal(replayRequest.Config().EndTime) || onGoingReplay.Config().StartTime.Before(replayRequest.Config().EndTime)) && + (onGoingReplay.Config().EndTime.Equal(replayRequest.Config().StartTime) || onGoingReplay.Config().EndTime.After(replayRequest.Config().StartTime)) { + return errors.NewError(errors.ErrFailedPrecond, scheduler.EntityJobRun, "conflicted replay found") + } + } + return nil +} + +func (v Validator) validateConflictedRun(ctx context.Context, replayRequest *scheduler.Replay, jobCron *cron.ScheduleSpec) error { + jobRunCriteria := &scheduler.JobRunsCriteria{ + Name: replayRequest.JobName().String(), + StartDate: replayRequest.Config().StartTime, + EndDate: replayRequest.Config().EndTime, + } + runs, err := v.scheduler.GetJobRuns(ctx, replayRequest.Tenant(), jobRunCriteria, jobCron) + if err != nil { + return err + } + for _, run := range runs { + if run.State == scheduler.StateQueued || run.State == scheduler.StateRunning { + return errors.NewError(errors.ErrFailedPrecond, scheduler.EntityJobRun, "conflicted job run found") + } + } + return nil +} diff --git a/core/scheduler/service/replay_validator_test.go b/core/scheduler/service/replay_validator_test.go new file mode 100644 index 0000000000..778cac1fe2 --- /dev/null +++ b/core/scheduler/service/replay_validator_test.go @@ -0,0 +1,148 @@ +package service_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/scheduler/service" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/lib/cron" +) + +func TestReplayValidator(t *testing.T) { + ctx := context.Background() + tnnt, _ := tenant.NewTenant("sample-project", "sample-namespace") + jobName := scheduler.JobName("sample_select") + startTimeStr := "2023-01-02T15:00:00Z" + startTime, _ := time.Parse(scheduler.ISODateFormat, startTimeStr) + endTime := startTime.Add(48 * time.Hour) + parallel := true + description := "sample backfill" + replayJobConfig := map[string]string{"EXECUTION_PROJECT": "example_project"} + replayConfig := scheduler.NewReplayConfig(startTime, endTime, parallel, replayJobConfig, description) + runsCriteriaJobA := &scheduler.JobRunsCriteria{ + Name: jobName.String(), + StartDate: startTime, + EndDate: endTime, + } + jobCronStr := "0 12 * * *" + jobCron, _ := cron.ParseCronSchedule(jobCronStr) + scheduledTimeStr1 := "2023-01-02T12:00:00Z" + scheduledTime1, _ := time.Parse(scheduler.ISODateFormat, scheduledTimeStr1) + replayStatusToValidate := []scheduler.ReplayState{ + scheduler.ReplayStateCreated, scheduler.ReplayStateInProgress, + scheduler.ReplayStatePartialReplayed, scheduler.ReplayStateReplayed, + } + replayReq := scheduler.NewReplayRequest(jobName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + t.Run("Validate", func(t *testing.T) { + t.Run("should return nil if no conflict replay or conflict run found", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + onGoingReplayConfig := scheduler.NewReplayConfig(time.Now(), time.Now(), parallel, replayJobConfig, description) + onGoingReplay := []*scheduler.Replay{ + scheduler.NewReplayRequest(jobName, tnnt, onGoingReplayConfig, scheduler.ReplayStateCreated), + scheduler.NewReplayRequest("other-job", tnnt, onGoingReplayConfig, scheduler.ReplayStateCreated), + } + currentRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + } + + replayRepository.On("GetReplayRequestsByStatus", ctx, replayStatusToValidate).Return(onGoingReplay, nil) + sch.On("GetJobRuns", ctx, tnnt, runsCriteriaJobA, jobCron).Return(currentRuns, nil) + + validator := service.NewValidator(replayRepository, sch) + err := validator.Validate(ctx, replayReq, jobCron) + assert.NoError(t, err) + }) + t.Run("should return error if conflict replay found", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + onGoingReplay := []*scheduler.Replay{ + scheduler.NewReplayRequest(jobName, tnnt, replayConfig, scheduler.ReplayStateInProgress), + } + + replayRepository.On("GetReplayRequestsByStatus", ctx, replayStatusToValidate).Return(onGoingReplay, nil) + + validator := service.NewValidator(replayRepository, sch) + err := validator.Validate(ctx, replayReq, jobCron) + assert.ErrorContains(t, err, "conflicted replay") + }) + t.Run("should return error if conflict run found", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + onGoingReplayConfig := scheduler.NewReplayConfig(time.Now(), time.Now(), parallel, replayJobConfig, description) + onGoingReplay := []*scheduler.Replay{ + scheduler.NewReplayRequest(jobName, tnnt, onGoingReplayConfig, scheduler.ReplayStateCreated), + } + currentRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateRunning, + }, + } + + replayRepository.On("GetReplayRequestsByStatus", ctx, replayStatusToValidate).Return(onGoingReplay, nil) + sch.On("GetJobRuns", ctx, tnnt, runsCriteriaJobA, jobCron).Return(currentRuns, nil) + + validator := service.NewValidator(replayRepository, sch) + err := validator.Validate(ctx, replayReq, jobCron) + assert.ErrorContains(t, err, "conflicted job run found") + }) + t.Run("should return error if unable to GetReplayRequestsByStatus", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + internalErr := errors.New("internal error") + replayRepository.On("GetReplayRequestsByStatus", ctx, replayStatusToValidate).Return(nil, internalErr) + + validator := service.NewValidator(replayRepository, sch) + err := validator.Validate(ctx, replayReq, jobCron) + assert.ErrorIs(t, err, internalErr) + }) + t.Run("should return error if unable to get job runs", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + onGoingReplayConfig := scheduler.NewReplayConfig(time.Now(), time.Now(), parallel, map[string]string{}, description) + onGoingReplay := []*scheduler.Replay{ + scheduler.NewReplayRequest(jobName, tnnt, onGoingReplayConfig, scheduler.ReplayStateCreated), + } + + replayRepository.On("GetReplayRequestsByStatus", ctx, replayStatusToValidate).Return(onGoingReplay, nil) + + internalErr := errors.New("internal error") + sch.On("GetJobRuns", ctx, tnnt, runsCriteriaJobA, jobCron).Return(nil, internalErr) + + validator := service.NewValidator(replayRepository, sch) + err := validator.Validate(ctx, replayReq, jobCron) + assert.ErrorIs(t, err, internalErr) + }) + }) +} diff --git a/core/scheduler/service/replay_worker.go b/core/scheduler/service/replay_worker.go new file mode 100644 index 0000000000..aabf4e550b --- /dev/null +++ b/core/scheduler/service/replay_worker.go @@ -0,0 +1,234 @@ +package service + +import ( + "fmt" + "time" + + "github.com/google/uuid" + "github.com/odpf/salt/log" + "golang.org/x/net/context" + + "github.com/odpf/optimus/config" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/lib/cron" +) + +type ReplayScheduler interface { + Clear(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) error + ClearBatch(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, startTime, endTime time.Time) error + + GetJobRuns(ctx context.Context, t tenant.Tenant, criteria *scheduler.JobRunsCriteria, jobCron *cron.ScheduleSpec) ([]*scheduler.JobRunStatus, error) +} + +type ReplayWorker struct { + l log.Logger + + replayRepo ReplayRepository + scheduler ReplayScheduler + + jobRepo JobRepository + + config config.ReplayConfig +} + +func NewReplayWorker(l log.Logger, replayRepo ReplayRepository, scheduler ReplayScheduler, jobRepo JobRepository, config config.ReplayConfig) *ReplayWorker { + return &ReplayWorker{l: l, replayRepo: replayRepo, scheduler: scheduler, jobRepo: jobRepo, config: config} +} + +type JobReplayRunService interface { + GetJobRuns(ctx context.Context, projectName tenant.ProjectName, jobName scheduler.JobName, criteria *scheduler.JobRunsCriteria) ([]*scheduler.JobRunStatus, error) +} + +func (w ReplayWorker) Process(replayReq *scheduler.ReplayWithRun) { + ctx := context.Background() + + w.l.Debug("processing replay request %s with status %s", replayReq.Replay.ID().String(), replayReq.Replay.State().String()) + jobCron, err := w.getJobCron(ctx, replayReq) + if err != nil { + w.l.Error(fmt.Sprintf("unable to get cron value for job %s: %s", replayReq.Replay.JobName(), err.Error()), "replay_id", replayReq.Replay.ID()) + w.updateReplayAsFailed(ctx, replayReq.Replay.ID(), err.Error()) + return + } + + switch replayReq.Replay.State() { + case scheduler.ReplayStateCreated: + err = w.processNewReplayRequest(ctx, replayReq, jobCron) + case scheduler.ReplayStatePartialReplayed: + err = w.processPartialReplayedRequest(ctx, replayReq, jobCron) + case scheduler.ReplayStateReplayed: + err = w.processReplayedRequest(ctx, replayReq, jobCron) + } + + if err != nil { + w.updateReplayAsFailed(ctx, replayReq.Replay.ID(), err.Error()) + } +} + +func (w ReplayWorker) processNewReplayRequest(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) (err error) { + state := scheduler.ReplayStateReplayed + var updatedRuns []*scheduler.JobRunStatus + if replayReq.Replay.Config().Parallel { + updatedRuns, err = w.processNewReplayRequestParallel(ctx, replayReq, jobCron) + } else { + updatedRuns, err = w.processNewReplayRequestSequential(ctx, replayReq, jobCron) + if len(replayReq.Runs) > 1 { + state = scheduler.ReplayStatePartialReplayed + } + } + if err != nil { + return err + } + if err := w.replayRepo.UpdateReplay(ctx, replayReq.Replay.ID(), state, updatedRuns, ""); err != nil { + w.l.Error("unable to update replay state", "replay_id", replayReq.Replay.ID()) + return err + } + return nil +} + +func (w ReplayWorker) processNewReplayRequestParallel(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) ([]*scheduler.JobRunStatus, error) { + startLogicalTime := replayReq.GetFirstExecutableRun().GetLogicalTime(jobCron) + endLogicalTime := replayReq.GetLastExecutableRun().GetLogicalTime(jobCron) + if err := w.scheduler.ClearBatch(ctx, replayReq.Replay.Tenant(), replayReq.Replay.JobName(), startLogicalTime, endLogicalTime); err != nil { + w.l.Error("unable to clear job run for replay", "replay_id", replayReq.Replay.ID()) + return nil, err + } + + w.l.Info("cleared [%s] runs for replay [%s]", replayReq.Replay.JobName().String(), replayReq.Replay.ID().String()) + + var updatedRuns []*scheduler.JobRunStatus + for _, run := range replayReq.Runs { + updatedRuns = append(updatedRuns, &scheduler.JobRunStatus{ScheduledAt: run.ScheduledAt, State: scheduler.StateReplayed}) + } + return updatedRuns, nil +} + +func (w ReplayWorker) processNewReplayRequestSequential(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) ([]*scheduler.JobRunStatus, error) { + runToClear := replayReq.GetFirstExecutableRun() + if err := w.scheduler.Clear(ctx, replayReq.Replay.Tenant(), replayReq.Replay.JobName(), runToClear.GetLogicalTime(jobCron)); err != nil { + w.l.Error("unable to clear job run for replay", "replay_id", replayReq.Replay.ID()) + return nil, err + } + + w.l.Info("cleared [%s] [%s] run for replay %s", replayReq.Replay.JobName().String(), runToClear.ScheduledAt, replayReq.Replay.ID().String()) + updatedReplayMap := map[time.Time]scheduler.State{ + runToClear.ScheduledAt: scheduler.StateReplayed, + } + updatedRuns := scheduler.JobRunStatusList(replayReq.Runs).MergeWithUpdatedRuns(updatedReplayMap) + return updatedRuns, nil +} + +func (w ReplayWorker) processPartialReplayedRequest(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) error { + incomingRuns, err := w.fetchRuns(ctx, replayReq, jobCron) + if err != nil { + w.l.Error(fmt.Sprintf("unable to get runs: %s", err.Error()), "replay_id", replayReq.Replay.ID()) + return err + } + + updatedReplayMap := identifyUpdatedRunStatus(replayReq.Runs, incomingRuns) + updatedRuns := scheduler.JobRunStatusList(replayReq.Runs).MergeWithUpdatedRuns(updatedReplayMap) + + replayedRuns := scheduler.JobRunStatusList(updatedRuns).GetSortedRunsByStates([]scheduler.State{scheduler.StateReplayed}) + toBeReplayedRuns := scheduler.JobRunStatusList(updatedRuns).GetSortedRunsByStates([]scheduler.State{scheduler.StatePending}) + + replayState := scheduler.ReplayStatePartialReplayed + if len(replayedRuns) == 0 && len(toBeReplayedRuns) > 0 { + logicalTimeToClear := toBeReplayedRuns[0].GetLogicalTime(jobCron) + if err := w.scheduler.Clear(ctx, replayReq.Replay.Tenant(), replayReq.Replay.JobName(), logicalTimeToClear); err != nil { + w.l.Error("unable to clear job run for replay", "replay_id", replayReq.Replay.ID()) + return err + } + w.l.Info("cleared [%s] [%s] run for replay %s", replayReq.Replay.JobName().String(), toBeReplayedRuns[0].ScheduledAt, replayReq.Replay.ID().String()) + + updatedReplayMap[toBeReplayedRuns[0].ScheduledAt] = scheduler.StateReplayed + updatedRuns = scheduler.JobRunStatusList(incomingRuns).MergeWithUpdatedRuns(updatedReplayMap) + } + + pendingRuns := scheduler.JobRunStatusList(updatedRuns).GetSortedRunsByStates([]scheduler.State{scheduler.StatePending}) + if len(pendingRuns) == 0 { + replayState = scheduler.ReplayStateReplayed + } + + if err := w.replayRepo.UpdateReplay(ctx, replayReq.Replay.ID(), replayState, updatedRuns, ""); err != nil { + w.l.Error("unable to update replay state", "replay_id", replayReq.Replay.ID()) + return err + } + return nil +} + +func (w ReplayWorker) processReplayedRequest(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) error { + incomingRuns, err := w.fetchRuns(ctx, replayReq, jobCron) + if err != nil { + w.l.Error(fmt.Sprintf("unable to get runs: %s", err.Error()), "replay_id", replayReq.Replay.ID()) + return err + } + + updatedReplayMap := identifyUpdatedRunStatus(replayReq.Runs, incomingRuns) + updatedRuns := scheduler.JobRunStatusList(incomingRuns).MergeWithUpdatedRuns(updatedReplayMap) + inProgressRuns := scheduler.JobRunStatusList(updatedRuns).GetSortedRunsByStates([]scheduler.State{scheduler.StateReplayed}) + failedRuns := scheduler.JobRunStatusList(updatedRuns).GetSortedRunsByStates([]scheduler.State{scheduler.StateFailed}) + + var message string + state := scheduler.ReplayStateReplayed + if len(inProgressRuns) == 0 && len(failedRuns) == 0 { + state = scheduler.ReplayStateSuccess + w.l.Info("marking replay %s as success", replayReq.Replay.ID().String()) + } else if len(inProgressRuns) == 0 && len(failedRuns) > 0 { + state = scheduler.ReplayStateFailed + message = fmt.Sprintf("found %d failed runs.", len(failedRuns)) + w.l.Info("marking replay %s as failed", replayReq.Replay.ID().String()) + } + + if err := w.replayRepo.UpdateReplay(ctx, replayReq.Replay.ID(), state, updatedRuns, message); err != nil { + w.l.Error("unable to update replay state", "replay_id", replayReq.Replay.ID()) + return err + } + return nil +} + +func identifyUpdatedRunStatus(existingJobRuns, incomingJobRuns []*scheduler.JobRunStatus) map[time.Time]scheduler.State { + incomingRunStatusMap := scheduler.JobRunStatusList(incomingJobRuns).ToRunStatusMap() + + updatedReplayMap := make(map[time.Time]scheduler.State) + for _, run := range existingJobRuns { + if run.State != scheduler.StateReplayed { + continue + } + if incomingRunStatusMap[run.ScheduledAt.UTC()] == scheduler.StateSuccess || incomingRunStatusMap[run.ScheduledAt.UTC()] == scheduler.StateFailed { + updatedReplayMap[run.ScheduledAt.UTC()] = incomingRunStatusMap[run.ScheduledAt.UTC()] + } + } + return updatedReplayMap +} + +func (w ReplayWorker) getJobCron(ctx context.Context, replayReq *scheduler.ReplayWithRun) (*cron.ScheduleSpec, error) { + jobWithDetails, err := w.jobRepo.GetJobDetails(ctx, replayReq.Replay.Tenant().ProjectName(), replayReq.Replay.JobName()) + if err != nil || jobWithDetails == nil { + return nil, fmt.Errorf("unable to get job details from DB for jobName: %s, project: %s, error: %w ", + replayReq.Replay.JobName(), replayReq.Replay.Tenant().ProjectName(), err) + } + interval := jobWithDetails.Schedule.Interval + if interval == "" { + return nil, fmt.Errorf("job schedule interval not found") + } + jobCron, err := cron.ParseCronSchedule(interval) + if err != nil { + return nil, fmt.Errorf("unable to parse job cron interval %w", err) + } + return jobCron, nil +} + +func (w ReplayWorker) fetchRuns(ctx context.Context, replayReq *scheduler.ReplayWithRun, jobCron *cron.ScheduleSpec) ([]*scheduler.JobRunStatus, error) { + jobRunCriteria := &scheduler.JobRunsCriteria{ + Name: replayReq.Replay.JobName().String(), + StartDate: replayReq.Replay.Config().StartTime, + EndDate: replayReq.Replay.Config().EndTime, + } + return w.scheduler.GetJobRuns(ctx, replayReq.Replay.Tenant(), jobRunCriteria, jobCron) +} + +func (w ReplayWorker) updateReplayAsFailed(ctx context.Context, replayID uuid.UUID, message string) { + if err := w.replayRepo.UpdateReplayStatus(ctx, replayID, scheduler.ReplayStateFailed, message); err != nil { + w.l.Error("unable to update replay state to failed", "replay_id", replayID) + } +} diff --git a/core/scheduler/service/replay_worker_test.go b/core/scheduler/service/replay_worker_test.go new file mode 100644 index 0000000000..bacd7f8b03 --- /dev/null +++ b/core/scheduler/service/replay_worker_test.go @@ -0,0 +1,554 @@ +package service_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/google/uuid" + "github.com/odpf/salt/log" + "github.com/stretchr/testify/mock" + + "github.com/odpf/optimus/config" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/scheduler/service" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/lib/cron" +) + +func TestReplayWorker(t *testing.T) { + logger := log.NewNoop() + jobAName, _ := scheduler.JobNameFrom("job-a") + projName := tenant.ProjectName("proj") + namespaceName := tenant.ProjectName("ns1") + tnnt, _ := tenant.NewTenant(projName.String(), namespaceName.String()) + startTimeStr := "2023-01-02T00:00:00Z" + startTime, _ := time.Parse(scheduler.ISODateFormat, startTimeStr) + endTime := startTime.Add(48 * time.Hour) + replayDescription := "sample backfill" + scheduledTimeStr1 := "2023-01-02T12:00:00Z" + scheduledTime1, _ := time.Parse(scheduler.ISODateFormat, scheduledTimeStr1) + runsCriteriaJobA := &scheduler.JobRunsCriteria{ + Name: jobAName.String(), + StartDate: startTime, + EndDate: endTime, + } + scheduledTime2 := scheduledTime1.Add(24 * time.Hour) + executionTime1 := scheduledTime1.Add(-24 * time.Hour) + executionTime2 := executionTime1.Add(24 * time.Hour) + jobCronStr := "0 12 * * *" + jobA := scheduler.Job{ + Name: jobAName, + Tenant: tnnt, + } + jobAWithDetails := &scheduler.JobWithDetails{ + Job: &jobA, + JobMetadata: &scheduler.JobMetadata{ + Version: 1, + }, + Schedule: &scheduler.Schedule{ + StartDate: startTime.Add(-time.Hour * 24), + Interval: jobCronStr, + }, + } + jobCron, _ := cron.ParseCronSchedule(jobCronStr) + replayJobConfig := map[string]string{"EXECUTION_PROJECT": "example_project"} + replayConfig := scheduler.NewReplayConfig(startTime, endTime, false, replayJobConfig, replayDescription) + replayConfigParallel := scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, replayDescription) + replayServerConfig := config.ReplayConfig{} + internalErr := errors.New("internal error") + + t.Run("Process", func(t *testing.T) { + t.Run("should able to process new sequential replay request with single run", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfig, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + }, + } + updatedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateReplayed, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("Clear", mock.Anything, tnnt, jobAName, scheduledTime1.Add(-24*time.Hour)).Return(nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateReplayed, updatedRuns, "").Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to process new sequential replay request with multiple run", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfig, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + updatedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateReplayed, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("Clear", mock.Anything, tnnt, jobAName, scheduledTime1.Add(-24*time.Hour)).Return(nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStatePartialReplayed, updatedRuns, "").Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to process new parallel replay request", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("ClearBatch", mock.Anything, tnnt, jobAName, executionTime1, executionTime2).Return(nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateReplayed, mock.Anything, "").Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + + t.Run("should able to update replay state as failed if unable to get job details", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(nil, internalErr) + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if unable to do clear batch of runs", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("ClearBatch", mock.Anything, tnnt, jobAName, executionTime1, executionTime2).Return(internalErr) + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if unable to do clear run", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfig, scheduler.ReplayStateCreated, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StatePending, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("Clear", mock.Anything, tnnt, jobAName, scheduledTime1.Add(-24*time.Hour)).Return(internalErr) + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + + t.Run("should able to process partial replayed request", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStatePartialReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateReplayed, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + updatedRuns1 := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + } + updatedRuns2 := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateReplayed, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(updatedRuns1, nil).Once() + sch.On("Clear", mock.Anything, tnnt, jobAName, scheduledTime2.Add(-24*time.Hour)).Return(nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateReplayed, updatedRuns2, "").Return(nil).Once() + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if unable to fetch job runs", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStatePartialReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateReplayed, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(nil, internalErr).Once() + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if unable to clear run when processing partial replayed request", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStatePartialReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateReplayed, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + }, + } + updatedRuns1 := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StatePending, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(updatedRuns1, nil).Once() + sch.On("Clear", mock.Anything, tnnt, jobAName, scheduledTime2.Add(-24*time.Hour)).Return(internalErr) + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + + t.Run("should able to process replayed request", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateReplayed, + }, + }, + } + updatedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateSuccess, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(updatedRuns, nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateSuccess, updatedRuns, "").Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if unable to fetch runs when processing replayed request", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateReplayed, + }, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(nil, internalErr) + replayRepository.On("UpdateReplayStatus", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, mock.Anything).Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + t.Run("should able to update replay state as failed if all runs finished and failure found", func(t *testing.T) { + replayRepository := new(ReplayRepository) + defer replayRepository.AssertExpectations(t) + + sch := new(mockReplayScheduler) + defer sch.AssertExpectations(t) + + jobRepository := new(JobRepository) + defer jobRepository.AssertExpectations(t) + + replayReq := &scheduler.ReplayWithRun{ + Replay: scheduler.NewReplay(uuid.New(), jobAName, tnnt, replayConfigParallel, scheduler.ReplayStateReplayed, time.Now()), + Runs: []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateReplayed, + }, + }, + } + updatedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: scheduledTime1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: scheduledTime2, + State: scheduler.StateFailed, + }, + } + + jobRepository.On("GetJobDetails", mock.Anything, projName, jobAName).Return(jobAWithDetails, nil) + sch.On("GetJobRuns", mock.Anything, tnnt, runsCriteriaJobA, jobCron).Return(updatedRuns, nil) + replayRepository.On("UpdateReplay", mock.Anything, replayReq.Replay.ID(), scheduler.ReplayStateFailed, updatedRuns, "found 1 failed runs.").Return(nil) + + replayWorker := service.NewReplayWorker(logger, replayRepository, sch, jobRepository, replayServerConfig) + replayWorker.Process(replayReq) + }) + }) +} + +// mockReplayScheduler is an autogenerated mock type for the mockReplayScheduler type +type mockReplayScheduler struct { + mock.Mock +} + +// Clear provides a mock function with given fields: ctx, t, jobName, scheduledAt +func (_m *mockReplayScheduler) Clear(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) error { + ret := _m.Called(ctx, t, jobName, scheduledAt) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, scheduler.JobName, time.Time) error); ok { + r0 = rf(ctx, t, jobName, scheduledAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClearBatch provides a mock function with given fields: ctx, t, jobName, startTime, endTime +func (_m *mockReplayScheduler) ClearBatch(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, startTime, endTime time.Time) error { + ret := _m.Called(ctx, t, jobName, startTime, endTime) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, scheduler.JobName, time.Time, time.Time) error); ok { + r0 = rf(ctx, t, jobName, startTime, endTime) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetJobRuns provides a mock function with given fields: ctx, t, criteria, jobCron +func (_m *mockReplayScheduler) GetJobRuns(ctx context.Context, t tenant.Tenant, criteria *scheduler.JobRunsCriteria, jobCron *cron.ScheduleSpec) ([]*scheduler.JobRunStatus, error) { + ret := _m.Called(ctx, t, criteria, jobCron) + + var r0 []*scheduler.JobRunStatus + if rf, ok := ret.Get(0).(func(context.Context, tenant.Tenant, *scheduler.JobRunsCriteria, *cron.ScheduleSpec) []*scheduler.JobRunStatus); ok { + r0 = rf(ctx, t, criteria, jobCron) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*scheduler.JobRunStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, tenant.Tenant, *scheduler.JobRunsCriteria, *cron.ScheduleSpec) error); ok { + r1 = rf(ctx, t, criteria, jobCron) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/core/scheduler/status.go b/core/scheduler/status.go index d0161a0561..8774d033bc 100644 --- a/core/scheduler/status.go +++ b/core/scheduler/status.go @@ -1,6 +1,7 @@ package scheduler import ( + "sort" "strings" "time" @@ -19,6 +20,9 @@ const ( StateSuccess State = "success" StateFailed State = "failed" + + // StateReplayed is a replay-specific state to identify a run has been replayed but not yet finished + StateReplayed State = "replayed" ) var TaskEndStates = []State{StateSuccess, StateFailed, StateRetry} @@ -41,6 +45,8 @@ func StateFromString(state string) (State, error) { return StateSuccess, nil case string(StateFailed): return StateFailed, nil + case string(StateReplayed): + return StateReplayed, nil default: return "", errors.InvalidArgument(EntityJobRun, "invalid state for run "+state) } @@ -67,6 +73,52 @@ func JobRunStatusFrom(scheduledAt time.Time, state string) (JobRunStatus, error) }, nil } +func (j JobRunStatus) GetLogicalTime(jobCron *cron.ScheduleSpec) time.Time { + return jobCron.Prev(j.ScheduledAt) +} + +type JobRunStatusList []*JobRunStatus + +func (j JobRunStatusList) GetSortedRunsByStates(states []State) []*JobRunStatus { + stateMap := make(map[State]bool, len(states)) + for _, state := range states { + stateMap[state] = true + } + + var result []*JobRunStatus + for _, run := range j { + if stateMap[run.State] { + result = append(result, run) + } + } + sort.Slice(result, func(i, j int) bool { + return result[i].ScheduledAt.Before(result[j].ScheduledAt) + }) + return result +} + +func (j JobRunStatusList) MergeWithUpdatedRuns(updatedRunMap map[time.Time]State) []*JobRunStatus { + var updatedRuns []*JobRunStatus + for _, run := range j { + if updatedStatus, ok := updatedRunMap[run.ScheduledAt.UTC()]; ok { + updatedRun := run + updatedRun.State = updatedStatus + updatedRuns = append(updatedRuns, updatedRun) + continue + } + updatedRuns = append(updatedRuns, run) + } + return updatedRuns +} + +func (j JobRunStatusList) ToRunStatusMap() map[time.Time]State { + runStatusMap := make(map[time.Time]State, len(j)) + for _, run := range j { + runStatusMap[run.ScheduledAt.UTC()] = run.State + } + return runStatusMap +} + // JobRunsCriteria represents the filter condition to get run status from scheduler type JobRunsCriteria struct { Name string diff --git a/core/scheduler/status_test.go b/core/scheduler/status_test.go index 0208cde41f..5d167e562f 100644 --- a/core/scheduler/status_test.go +++ b/core/scheduler/status_test.go @@ -98,6 +98,8 @@ func TestStatus(t *testing.T) { "SUCCESS": scheduler.StateSuccess, "failed": scheduler.StateFailed, "FAILED": scheduler.StateFailed, + "replayed": scheduler.StateReplayed, + "REPLAYED": scheduler.StateReplayed, } for input, expectedState := range expectationsMap { respState, err := scheduler.StateFromString(input) @@ -110,4 +112,119 @@ func TestStatus(t *testing.T) { assert.EqualError(t, err, "invalid argument for entity jobRun: invalid state for run unregisteredState") assert.Equal(t, scheduler.State(""), respState) }) + t.Run("GetLogicalTime", func(t *testing.T) { + time1 := time.Date(2023, 0o1, 1, 0, 0, 0, 0, time.UTC) + time2 := time.Date(2023, 0o1, 2, 0, 0, 0, 0, time.UTC) + schedule, err := cron.ParseCronSchedule("@midnight") + assert.Nil(t, err) + + jobRunStatus, err := scheduler.JobRunStatusFrom(time2, "running") + assert.Nil(t, err) + + logicalTime := jobRunStatus.GetLogicalTime(schedule) + assert.Equal(t, time1, logicalTime) + }) + t.Run("GetSortedRunsByStates", func(t *testing.T) { + time1 := time.Date(2023, 0o1, 1, 0, 0, 0, 0, time.UTC) + time2 := time.Date(2023, 0o1, 2, 0, 0, 0, 0, time.UTC) + time3 := time.Date(2023, 0o1, 3, 0, 0, 0, 0, time.UTC) + + jobRunStatusList := scheduler.JobRunStatusList([]*scheduler.JobRunStatus{ + { + ScheduledAt: time3, + State: scheduler.StateRunning, + }, + { + ScheduledAt: time1, + State: scheduler.StatePending, + }, + { + ScheduledAt: time2, + State: scheduler.StateRunning, + }, + }) + expectedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: time2, + State: scheduler.StateRunning, + }, + { + ScheduledAt: time3, + State: scheduler.StateRunning, + }, + } + + runs := jobRunStatusList.GetSortedRunsByStates([]scheduler.State{scheduler.StateRunning}) + assert.Equal(t, expectedRuns, runs) + }) + t.Run("MergeWithUpdatedRuns", func(t *testing.T) { + time1 := time.Date(2023, 0o1, 1, 0, 0, 0, 0, time.UTC) + time2 := time.Date(2023, 0o1, 2, 0, 0, 0, 0, time.UTC) + time3 := time.Date(2023, 0o1, 3, 0, 0, 0, 0, time.UTC) + + jobRunStatusList := scheduler.JobRunStatusList([]*scheduler.JobRunStatus{ + { + ScheduledAt: time1, + State: scheduler.StatePending, + }, + { + ScheduledAt: time2, + State: scheduler.StateRunning, + }, + { + ScheduledAt: time3, + State: scheduler.StateRunning, + }, + }) + updatedRuns := map[time.Time]scheduler.State{ + time1: scheduler.StateSuccess, + time2: scheduler.StateSuccess, + } + expectedRuns := []*scheduler.JobRunStatus{ + { + ScheduledAt: time1, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: time2, + State: scheduler.StateSuccess, + }, + { + ScheduledAt: time3, + State: scheduler.StateRunning, + }, + } + + mergedRuns := jobRunStatusList.MergeWithUpdatedRuns(updatedRuns) + + assert.Equal(t, expectedRuns, mergedRuns) + }) + t.Run("ToRunStatusMap", func(t *testing.T) { + time1 := time.Date(2023, 0o1, 1, 0, 0, 0, 0, time.UTC) + time2 := time.Date(2023, 0o1, 2, 0, 0, 0, 0, time.UTC) + time3 := time.Date(2023, 0o1, 3, 0, 0, 0, 0, time.UTC) + + jobRunStatusList := scheduler.JobRunStatusList([]*scheduler.JobRunStatus{ + { + ScheduledAt: time1, + State: scheduler.StatePending, + }, + { + ScheduledAt: time2, + State: scheduler.StateRunning, + }, + { + ScheduledAt: time3, + State: scheduler.StateRunning, + }, + }) + expectedMap := map[time.Time]scheduler.State{ + time1: scheduler.StatePending, + time2: scheduler.StateRunning, + time3: scheduler.StateRunning, + } + + runStatusMap := jobRunStatusList.ToRunStatusMap() + assert.EqualValues(t, expectedMap, runStatusMap) + }) } diff --git a/ext/scheduler/airflow/airflow.go b/ext/scheduler/airflow/airflow.go index 3c31a22799..d70af4273d 100644 --- a/ext/scheduler/airflow/airflow.go +++ b/ext/scheduler/airflow/airflow.go @@ -10,6 +10,7 @@ import ( "path" "path/filepath" "strings" + "time" "github.com/kushsharma/parallel" "github.com/odpf/salt/log" @@ -29,6 +30,7 @@ const ( EntityAirflow = "Airflow" dagStatusBatchURL = "api/v1/dags/~/dagRuns/list" + dagRunClearURL = "api/v1/dags/%s/clearTaskInstances" airflowDateFormat = "2006-01-02T15:04:05+00:00" schedulerHostKey = "SCHEDULER_HOST" @@ -304,6 +306,34 @@ func (s *Scheduler) getSchedulerAuth(ctx context.Context, tnnt tenant.Tenant) (S }, nil } +func (s *Scheduler) Clear(ctx context.Context, t tenant.Tenant, jobName scheduler.JobName, executionTime time.Time) error { + return s.ClearBatch(ctx, t, jobName, executionTime, executionTime) +} + +func (s *Scheduler) ClearBatch(ctx context.Context, tnnt tenant.Tenant, jobName scheduler.JobName, startExecutionTime, endExecutionTime time.Time) error { + spanCtx, span := startChildSpan(ctx, "Clear") + defer span.End() + + data := []byte(fmt.Sprintf(`{"start_date": %q, "end_date": %q, "dry_run": false, "reset_dag_runs": true, "only_failed": false}`, + startExecutionTime.UTC().Format(airflowDateFormat), + endExecutionTime.UTC().Format(airflowDateFormat))) + req := airflowRequest{ + URL: dagRunClearURL, + method: http.MethodPost, + param: jobName.String(), + body: data, + } + schdAuth, err := s.getSchedulerAuth(ctx, tnnt) + if err != nil { + return err + } + _, err = s.client.Invoke(spanCtx, req, schdAuth) + if err != nil { + return fmt.Errorf("failure reason for clearing airflow dag runs: %w", err) + } + return nil +} + func NewScheduler(l log.Logger, bucketFac BucketFactory, client Client, compiler DagCompiler, projectGetter ProjectGetter, secretGetter SecretGetter) *Scheduler { return &Scheduler{ l: l, diff --git a/internal/store/postgres/migrations/000051_replace_replay.down.sql b/internal/store/postgres/migrations/000051_replace_replay.down.sql new file mode 100644 index 0000000000..3a3b517690 --- /dev/null +++ b/internal/store/postgres/migrations/000051_replace_replay.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS replay_request; + +ALTER TABLE IF EXISTS replay_old + RENAME TO replay; diff --git a/internal/store/postgres/migrations/000051_replace_replay.up.sql b/internal/store/postgres/migrations/000051_replace_replay.up.sql new file mode 100644 index 0000000000..1d61f1b86f --- /dev/null +++ b/internal/store/postgres/migrations/000051_replace_replay.up.sql @@ -0,0 +1,27 @@ +ALTER TABLE IF EXISTS replay + RENAME TO replay_old; + +CREATE TABLE IF NOT EXISTS replay_request ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + + project_name VARCHAR NOT NULL, + namespace_name VARCHAR NOT NULL, + job_name VARCHAR NOT NULL, + + description VARCHAR, + parallel BOOLEAN, + + start_time TIMESTAMP WITH TIME ZONE NOT NULL, + end_time TIMESTAMP WITH TIME ZONE, + + job_config JSONB NOT NULL, + + status VARCHAR(30) NOT NULL, + message TEXT, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL +); + +CREATE INDEX IF NOT EXISTS replay_request_project_name_idx on replay_request(project_name); +CREATE INDEX IF NOT EXISTS replay_request_job_name_idx on replay_request(job_name); diff --git a/internal/store/postgres/migrations/000052_create_replay_run.down.sql b/internal/store/postgres/migrations/000052_create_replay_run.down.sql new file mode 100644 index 0000000000..a33552dcd7 --- /dev/null +++ b/internal/store/postgres/migrations/000052_create_replay_run.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS replay_run; \ No newline at end of file diff --git a/internal/store/postgres/migrations/000052_create_replay_run.up.sql b/internal/store/postgres/migrations/000052_create_replay_run.up.sql new file mode 100644 index 0000000000..bac72d5808 --- /dev/null +++ b/internal/store/postgres/migrations/000052_create_replay_run.up.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS replay_run ( + replay_id UUID, + + scheduled_at TIMESTAMP WITH TIME ZONE NOT NULL, + status VARCHAR(30) NOT NULL, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + + CONSTRAINT replay_run_replay_id_fkey + FOREIGN KEY(replay_id) + REFERENCES replay_request(id) + ON DELETE CASCADE +); diff --git a/internal/store/postgres/scheduler/replay_repository.go b/internal/store/postgres/scheduler/replay_repository.go new file mode 100644 index 0000000000..764e7fba5e --- /dev/null +++ b/internal/store/postgres/scheduler/replay_repository.go @@ -0,0 +1,366 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "golang.org/x/net/context" + + "github.com/odpf/optimus/core/job" + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + "github.com/odpf/optimus/internal/errors" +) + +const ( + replayColumnsToStore = `job_name, namespace_name, project_name, start_time, end_time, description, parallel, job_config, status, message` + replayColumns = `id, ` + replayColumnsToStore + `, created_at` + + replayRunColumns = `replay_id, scheduled_at, status` + replayRunDetailColumns = `id as replay_id, job_name, namespace_name, project_name, start_time, end_time, description, +parallel, job_config, r.status as replay_status, r.message as replay_message, scheduled_at, run.status as run_status, r.created_at as replay_created_at` + + updateReplayRequest = `UPDATE replay_request SET status = $1, message = $2, updated_at = NOW() WHERE id = $3` +) + +type ReplayRepository struct { + db *pgxpool.Pool +} + +type replayRequest struct { + ID uuid.UUID + + JobName string + NamespaceName string + ProjectName string + + StartTime time.Time + EndTime time.Time + Description string + Parallel bool + JobConfig map[string]string + + Status string + Message string + + CreatedAt time.Time + UpdatedAt time.Time +} + +func (r *replayRequest) toSchedulerReplayRequest() (*scheduler.Replay, error) { + tnnt, err := tenant.NewTenant(r.ProjectName, r.NamespaceName) + if err != nil { + return nil, err + } + conf := scheduler.NewReplayConfig(r.StartTime, r.EndTime, r.Parallel, r.JobConfig, r.Description) + replayStatus, err := scheduler.ReplayStateFromString(r.Status) + if err != nil { + return nil, err + } + jobName, err := scheduler.JobNameFrom(r.JobName) + if err != nil { + return nil, err + } + return scheduler.NewReplay(r.ID, jobName, tnnt, conf, replayStatus, r.CreatedAt), nil +} + +type replayRun struct { + ID uuid.UUID + + JobName string + NamespaceName string + ProjectName string + + StartTime time.Time + EndTime time.Time + Description string + Parallel bool + JobConfig map[string]string + + ReplayStatus string + Message string + + ScheduledTime time.Time + RunStatus string + + CreatedAt time.Time + UpdatedAt time.Time +} + +func (r *replayRun) toReplayRequest() (*scheduler.Replay, error) { + tnnt, err := tenant.NewTenant(r.ProjectName, r.NamespaceName) + if err != nil { + return nil, err + } + conf := scheduler.NewReplayConfig(r.StartTime, r.EndTime, r.Parallel, r.JobConfig, r.Description) + replayStatus, err := scheduler.ReplayStateFromString(r.ReplayStatus) + if err != nil { + return nil, err + } + jobName, err := scheduler.JobNameFrom(r.JobName) + if err != nil { + return nil, err + } + return scheduler.NewReplay(r.ID, jobName, tnnt, conf, replayStatus, r.CreatedAt), nil +} + +func (r *replayRun) toJobRunStatus() (*scheduler.JobRunStatus, error) { + runState, err := scheduler.StateFromString(r.RunStatus) + if err != nil { + return nil, err + } + return &scheduler.JobRunStatus{ + ScheduledAt: r.ScheduledTime.UTC(), + State: runState, + }, nil +} + +func (r ReplayRepository) RegisterReplay(ctx context.Context, replay *scheduler.Replay, runs []*scheduler.JobRunStatus) (uuid.UUID, error) { + tx, err := r.db.BeginTx(ctx, pgx.TxOptions{}) + if err != nil { + return uuid.Nil, err + } + defer func() { + if err != nil { + tx.Rollback(ctx) + } else { + tx.Commit(ctx) + } + }() + + if err := r.insertReplay(ctx, tx, replay); err != nil { + return uuid.Nil, err + } + + storedReplay, err := r.getReplayRequest(ctx, tx, replay) + if err != nil { + return uuid.Nil, err + } + + // TODO: consider to store message of each run + if err := r.insertReplayRuns(ctx, tx, storedReplay.ID, runs); err != nil { + return uuid.Nil, err + } + + return storedReplay.ID, nil +} + +func (r ReplayRepository) GetReplayToExecute(ctx context.Context) (*scheduler.ReplayWithRun, error) { + tx, err := r.db.BeginTx(ctx, pgx.TxOptions{}) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + tx.Rollback(ctx) + } else { + tx.Commit(ctx) + } + }() + + replayRuns, err := r.getExecutableReplayRuns(ctx, tx) + if err != nil { + return nil, err + } + if replayRuns == nil { + return nil, errors.NotFound(scheduler.EntityJobRun, "no executable replay request found") + } + + storedReplay, err := toReplay(replayRuns) + if err != nil { + return nil, err + } + + // TODO: Avoid having In Progress, but instead use row lock (for update) + if _, err := tx.Exec(ctx, updateReplayRequest, scheduler.ReplayStateInProgress, "", storedReplay.Replay.ID()); err != nil { + return nil, errors.Wrap(scheduler.EntityJobRun, "unable to update replay", err) + } + return storedReplay, nil +} + +func (r ReplayRepository) GetReplayRequestsByStatus(ctx context.Context, statusList []scheduler.ReplayState) ([]*scheduler.Replay, error) { + getReplayRequest := `SELECT ` + replayColumns + ` FROM replay_request WHERE status = ANY($1)` + rows, err := r.db.Query(ctx, getReplayRequest, statusList) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "unable to get replay list", err) + } + defer rows.Close() + + var replayReqs []*scheduler.Replay + for rows.Next() { + var rr replayRequest + if err := rows.Scan(&rr.ID, &rr.JobName, &rr.NamespaceName, &rr.ProjectName, &rr.StartTime, &rr.EndTime, &rr.Description, &rr.Parallel, &rr.JobConfig, + &rr.Status, &rr.Message, &rr.CreatedAt); err != nil { + return nil, errors.Wrap(scheduler.EntityJobRun, "unable to get the stored replay", err) + } + schedulerReplayReq, err := rr.toSchedulerReplayRequest() + if err != nil { + return nil, err + } + replayReqs = append(replayReqs, schedulerReplayReq) + } + return replayReqs, nil +} + +func toReplay(replayRuns []*replayRun) (*scheduler.ReplayWithRun, error) { + var storedReplay *scheduler.ReplayWithRun + for _, run := range replayRuns { + if storedReplay != nil { + runState, err := scheduler.StateFromString(run.RunStatus) + if err != nil { + return nil, err + } + jobRun := &scheduler.JobRunStatus{ + ScheduledAt: run.ScheduledTime.UTC(), + State: runState, + } + storedReplay.Runs = append(storedReplay.Runs, jobRun) + continue + } + + replay, err := run.toReplayRequest() + if err != nil { + return nil, err + } + + jobRun, err := run.toJobRunStatus() + if err != nil { + return nil, err + } + + storedReplay = &scheduler.ReplayWithRun{ + Replay: replay, + Runs: []*scheduler.JobRunStatus{jobRun}, + } + } + return storedReplay, nil +} + +func (r ReplayRepository) UpdateReplayStatus(ctx context.Context, id uuid.UUID, replayStatus scheduler.ReplayState, message string) error { + return r.updateReplayRequest(ctx, id, replayStatus, message) +} + +func (r ReplayRepository) UpdateReplay(ctx context.Context, id uuid.UUID, replayStatus scheduler.ReplayState, runs []*scheduler.JobRunStatus, message string) error { + if err := r.updateReplayRequest(ctx, id, replayStatus, message); err != nil { + return err + } + + return r.updateReplayRuns(ctx, id, runs) +} + +func (r ReplayRepository) GetReplayJobConfig(ctx context.Context, jobTenant tenant.Tenant, jobName scheduler.JobName, scheduledAt time.Time) (map[string]string, error) { + getReplayRequest := `SELECT job_config FROM replay_request WHERE job_name=$1 AND namespace_name=$2 AND project_name=$3 AND start_time<=$4 AND $4<=end_time ORDER BY created_at ASC` + rows, err := r.db.Query(ctx, getReplayRequest, jobName, jobTenant.NamespaceName(), jobTenant.ProjectName(), scheduledAt) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "unable to get replay job configs", err) + } + defer rows.Close() + + configs := map[string]string{} + for rows.Next() { + var rr replayRequest + if err := rows.Scan(&rr.JobConfig); err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.NotFound(job.EntityJob, fmt.Sprintf("no replay found for scheduledAt %s", scheduledAt.String())) + } + return nil, errors.Wrap(scheduler.EntityJobRun, "unable to get the stored replay job cnfig", err) + } + for k, v := range rr.JobConfig { + configs[k] = v + } + } + return configs, nil +} + +func (r ReplayRepository) updateReplayRequest(ctx context.Context, id uuid.UUID, replayStatus scheduler.ReplayState, message string) error { + if _, err := r.db.Exec(ctx, updateReplayRequest, replayStatus, message, id); err != nil { + return errors.Wrap(scheduler.EntityJobRun, "unable to update replay", err) + } + return nil +} + +func (r ReplayRepository) updateReplayRuns(ctx context.Context, id uuid.UUID, runs []*scheduler.JobRunStatus) error { + tx, err := r.db.BeginTx(ctx, pgx.TxOptions{}) + if err != nil { + return err + } + defer func() { + if err != nil { + tx.Rollback(ctx) + } else { + tx.Commit(ctx) + } + }() + + deleteRuns := `DELETE FROM replay_run WHERE replay_id = $1` + if _, err := tx.Exec(ctx, deleteRuns, id); err != nil { + return errors.Wrap(scheduler.EntityJobRun, "unable to delete runs of replay", err) + } + return r.insertReplayRuns(ctx, tx, id, runs) +} + +func (ReplayRepository) insertReplay(ctx context.Context, tx pgx.Tx, replay *scheduler.Replay) error { + insertReplay := `INSERT INTO replay_request (` + replayColumnsToStore + `, created_at, updated_at) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW(), NOW())` + _, err := tx.Exec(ctx, insertReplay, replay.JobName().String(), replay.Tenant().NamespaceName(), replay.Tenant().ProjectName(), + replay.Config().StartTime, replay.Config().EndTime, replay.Config().Description, replay.Config().Parallel, replay.Config().JobConfig, replay.State(), replay.Message()) + if err != nil { + return errors.Wrap(scheduler.EntityJobRun, "unable to store replay", err) + } + return nil +} + +func (ReplayRepository) getReplayRequest(ctx context.Context, tx pgx.Tx, replay *scheduler.Replay) (replayRequest, error) { + var rr replayRequest + getReplayRequest := `SELECT ` + replayColumns + ` FROM replay_request where project_name = $1 and job_name = $2 and start_time = $3 and end_time = $4 order by created_at desc limit 1` + if err := tx.QueryRow(ctx, getReplayRequest, replay.Tenant().ProjectName(), replay.JobName().String(), replay.Config().StartTime, replay.Config().EndTime). + Scan(&rr.ID, &rr.JobName, &rr.NamespaceName, &rr.ProjectName, &rr.StartTime, &rr.EndTime, &rr.Description, &rr.Parallel, &rr.JobConfig, + &rr.Status, &rr.Message, &rr.CreatedAt); err != nil { + return rr, errors.Wrap(scheduler.EntityJobRun, "unable to get the stored replay", err) + } + return rr, nil +} + +func (ReplayRepository) getExecutableReplayRuns(ctx context.Context, tx pgx.Tx) ([]*replayRun, error) { + getReplayRequest := ` + WITH request AS ( + SELECT ` + replayColumns + ` FROM replay_request WHERE status IN ('created', 'partial replayed', 'replayed') + ORDER BY updated_at DESC LIMIT 1 + ) + SELECT ` + replayRunDetailColumns + ` FROM replay_run AS run + JOIN request AS r ON (replay_id = r.id)` + + rows, err := tx.Query(ctx, getReplayRequest) + if err != nil { + return nil, errors.Wrap(job.EntityJob, "unable to get the stored replay", err) + } + defer rows.Close() + + var runs []*replayRun + for rows.Next() { + var run replayRun + if err := rows.Scan(&run.ID, &run.JobName, &run.NamespaceName, &run.ProjectName, &run.StartTime, &run.EndTime, + &run.Description, &run.Parallel, &run.JobConfig, &run.ReplayStatus, &run.Message, &run.ScheduledTime, &run.RunStatus, &run.CreatedAt); err != nil { + return runs, errors.Wrap(scheduler.EntityJobRun, "unable to get the stored replay", err) + } + runs = append(runs, &run) + } + return runs, nil +} + +func (ReplayRepository) insertReplayRuns(ctx context.Context, tx pgx.Tx, replayID uuid.UUID, runs []*scheduler.JobRunStatus) error { + insertReplayRun := `INSERT INTO replay_run (` + replayRunColumns + `, created_at, updated_at) values ($1, $2, $3, NOW(), NOW())` + for _, run := range runs { + _, err := tx.Exec(ctx, insertReplayRun, replayID, run.ScheduledAt, run.State) + if err != nil { + return errors.Wrap(scheduler.EntityJobRun, "unable to store replay", err) + } + } + return nil +} + +func NewReplayRepository(db *pgxpool.Pool) *ReplayRepository { + return &ReplayRepository{db: db} +} diff --git a/internal/store/postgres/scheduler/replay_repository_test.go b/internal/store/postgres/scheduler/replay_repository_test.go new file mode 100644 index 0000000000..d65ccc1fe5 --- /dev/null +++ b/internal/store/postgres/scheduler/replay_repository_test.go @@ -0,0 +1,181 @@ +//go:build !unit_test + +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/odpf/optimus/core/scheduler" + "github.com/odpf/optimus/core/tenant" + postgres "github.com/odpf/optimus/internal/store/postgres/scheduler" +) + +func TestPostgresSchedulerRepository(t *testing.T) { + ctx := context.Background() + tnnt, _ := tenant.NewTenant("test-proj", "test-ns") + endTime := time.Now() + startTime := endTime.Add(-48 * time.Hour) + replayJobConfig := map[string]string{"EXECUTION_PROJECT": "example_project"} + description := "sample backfill" + + jobRunsAllPending := []*scheduler.JobRunStatus{ + { + ScheduledAt: startTime, + State: scheduler.StatePending, + }, + { + ScheduledAt: startTime.Add(24 * time.Hour), + State: scheduler.StatePending, + }, + } + jobRunsAllQueued := []*scheduler.JobRunStatus{ + { + ScheduledAt: startTime, + State: scheduler.StateQueued, + }, + { + ScheduledAt: startTime.Add(24 * time.Hour), + State: scheduler.StateQueued, + }, + } + + t.Run("RegisterReplay", func(t *testing.T) { + t.Run("store replay request and the runs", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, description) + replayReq := scheduler.NewReplayRequest(jobAName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + replayID, err := replayRepo.RegisterReplay(ctx, replayReq, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID) + }) + }) + + t.Run("UpdateReplay", func(t *testing.T) { + t.Run("updates replay request and reinsert the runs", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, description) + replayReq := scheduler.NewReplayRequest(jobAName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + replayID, err := replayRepo.RegisterReplay(ctx, replayReq, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID) + + err = replayRepo.UpdateReplay(ctx, replayID, scheduler.ReplayStateReplayed, jobRunsAllQueued, "") + assert.NoError(t, err) + }) + }) + + t.Run("GetReplayToExecute", func(t *testing.T) { + t.Run("return executable replay", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, description) + replayReq1 := scheduler.NewReplayRequest(jobAName, tnnt, replayConfig, scheduler.ReplayStateSuccess) + replayReq2 := scheduler.NewReplayRequest(jobBName, tnnt, replayConfig, scheduler.ReplayStateCreated) + + replayID1, err := replayRepo.RegisterReplay(ctx, replayReq1, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID1) + + replayID2, err := replayRepo.RegisterReplay(ctx, replayReq2, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID2) + + replayToExecute, err := replayRepo.GetReplayToExecute(ctx) + assert.Nil(t, err) + assert.Equal(t, jobBName, replayToExecute.Replay.JobName().String()) + }) + t.Run("return error not found if no executable replay found", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, map[string]string{}, description) + replayReq1 := scheduler.NewReplayRequest(jobAName, tnnt, replayConfig, scheduler.ReplayStateSuccess) + + replayID1, err := replayRepo.RegisterReplay(ctx, replayReq1, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID1) + + replayToExecute, err := replayRepo.GetReplayToExecute(ctx) + assert.ErrorContains(t, err, "no executable replay request found") + assert.Nil(t, replayToExecute) + }) + }) + + t.Run("GetReplayRequestsByStatus", func(t *testing.T) { + t.Run("return replay requests given list of status", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, description) + replayReq1 := scheduler.NewReplayRequest(jobAName, tnnt, replayConfig, scheduler.ReplayStateInProgress) + replayReq2 := scheduler.NewReplayRequest(jobBName, tnnt, replayConfig, scheduler.ReplayStateCreated) + replayReq3 := scheduler.NewReplayRequest("sample-job-C", tnnt, replayConfig, scheduler.ReplayStateFailed) + + replayID1, err := replayRepo.RegisterReplay(ctx, replayReq1, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID1) + + replayID2, err := replayRepo.RegisterReplay(ctx, replayReq2, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID2) + + replayID3, err := replayRepo.RegisterReplay(ctx, replayReq3, jobRunsAllPending) + assert.Nil(t, err) + assert.NotNil(t, replayID3) + + replayReqs, err := replayRepo.GetReplayRequestsByStatus(ctx, []scheduler.ReplayState{scheduler.ReplayStateCreated, scheduler.ReplayStateInProgress}) + assert.Nil(t, err) + assert.EqualValues(t, []string{jobAName, jobBName}, []string{replayReqs[0].JobName().String(), replayReqs[1].JobName().String()}) + }) + }) + + t.Run("GetReplayJobConfig", func(t *testing.T) { + t.Run("return replay task config when scheduledAt is provided", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + startTime, _ := time.Parse(scheduler.ISODateFormat, "2022-01-01T15:04:05Z") + endTime, _ := time.Parse(scheduler.ISODateFormat, "2022-01-03T15:04:05Z") + scheduledAt, _ := time.Parse(scheduler.ISODateFormat, "2022-01-02T15:04:05Z") + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, map[string]string{"EXECUTION_PROJECT": "example1"}, description) + replayReq := scheduler.NewReplayRequest(jobBName, tnnt, replayConfig, scheduler.ReplayStateCreated) + _, err := replayRepo.RegisterReplay(ctx, replayReq, jobRunsAllPending) + assert.Nil(t, err) + replayConfig = scheduler.NewReplayConfig(startTime, endTime, true, replayJobConfig, description) + replayReq = scheduler.NewReplayRequest(jobBName, tnnt, replayConfig, scheduler.ReplayStateCreated) + _, err = replayRepo.RegisterReplay(ctx, replayReq, jobRunsAllPending) + assert.Nil(t, err) + + actualReplayJobConfig, err := replayRepo.GetReplayJobConfig(ctx, tnnt, jobBName, scheduledAt) + assert.Nil(t, err) + assert.Equal(t, replayJobConfig, actualReplayJobConfig) + }) + t.Run("return empty replay task config when there's no extra config in replay config", func(t *testing.T) { + db := dbSetup() + replayRepo := postgres.NewReplayRepository(db) + startTime, _ := time.Parse(scheduler.ISODateFormat, "2022-01-01T15:04:05Z") + endTime, _ := time.Parse(scheduler.ISODateFormat, "2022-01-03T15:04:05Z") + scheduledAt, _ := time.Parse(scheduler.ISODateFormat, "2022-01-02T15:04:05Z") + + replayConfig := scheduler.NewReplayConfig(startTime, endTime, true, map[string]string{}, description) + replayReq := scheduler.NewReplayRequest(jobBName, tnnt, replayConfig, scheduler.ReplayStateCreated) + _, err := replayRepo.RegisterReplay(ctx, replayReq, jobRunsAllPending) + assert.Nil(t, err) + + actualReplayJobConfig, err := replayRepo.GetReplayJobConfig(ctx, tnnt, jobBName, scheduledAt) + assert.Nil(t, err) + assert.Equal(t, map[string]string{}, actualReplayJobConfig) + }) + }) +} diff --git a/protos/odpf/optimus/core/v1beta1/replay.pb.go b/protos/odpf/optimus/core/v1beta1/replay.pb.go index c9223b9a17..9604a6e3f0 100644 --- a/protos/odpf/optimus/core/v1beta1/replay.pb.go +++ b/protos/odpf/optimus/core/v1beta1/replay.pb.go @@ -28,16 +28,14 @@ type ReplayRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` - JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - NamespaceName string `protobuf:"bytes,3,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` - StartDate string `protobuf:"bytes,4,opt,name=start_date,json=startDate,proto3" json:"start_date,omitempty"` - EndDate string `protobuf:"bytes,5,opt,name=end_date,json=endDate,proto3" json:"end_date,omitempty"` - Force bool `protobuf:"varint,6,opt,name=force,proto3" json:"force,omitempty"` - // represents which downstream to be replayed. - // possible values are the namespace names, *, or empty. - // '*' means all namespaces are allowed, empty list means all downstream will be ignored. - AllowedDownstreamNamespaces []string `protobuf:"bytes,7,rep,name=allowed_downstream_namespaces,json=allowedDownstreamNamespaces,proto3" json:"allowed_downstream_namespaces,omitempty"` + ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` + JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` + NamespaceName string `protobuf:"bytes,3,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + Parallel bool `protobuf:"varint,6,opt,name=parallel,proto3" json:"parallel,omitempty"` + Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"` + JobConfig string `protobuf:"bytes,8,opt,name=job_config,json=jobConfig,proto3" json:"job_config,omitempty"` } func (x *ReplayRequest) Reset() { @@ -93,684 +91,66 @@ func (x *ReplayRequest) GetNamespaceName() string { return "" } -func (x *ReplayRequest) GetStartDate() string { +func (x *ReplayRequest) GetStartTime() *timestamppb.Timestamp { if x != nil { - return x.StartDate - } - return "" -} - -func (x *ReplayRequest) GetEndDate() string { - if x != nil { - return x.EndDate - } - return "" -} - -func (x *ReplayRequest) GetForce() bool { - if x != nil { - return x.Force - } - return false -} - -func (x *ReplayRequest) GetAllowedDownstreamNamespaces() []string { - if x != nil { - return x.AllowedDownstreamNamespaces - } - return nil -} - -type ReplayResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IgnoredJobs []string `protobuf:"bytes,2,rep,name=ignored_jobs,json=ignoredJobs,proto3" json:"ignored_jobs,omitempty"` -} - -func (x *ReplayResponse) Reset() { - *x = ReplayResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayResponse) ProtoMessage() {} - -func (x *ReplayResponse) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayResponse.ProtoReflect.Descriptor instead. -func (*ReplayResponse) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{1} -} - -func (x *ReplayResponse) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *ReplayResponse) GetIgnoredJobs() []string { - if x != nil { - return x.IgnoredJobs - } - return nil -} - -type ReplayDryRunRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` - JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - NamespaceName string `protobuf:"bytes,3,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` - StartDate string `protobuf:"bytes,4,opt,name=start_date,json=startDate,proto3" json:"start_date,omitempty"` - EndDate string `protobuf:"bytes,5,opt,name=end_date,json=endDate,proto3" json:"end_date,omitempty"` - // represents which downstream to be replayed. - // possible values are the namespace names, *, or empty. - // '*' means all namespaces are allowed, empty list means all downstream will be ignored. - AllowedDownstreamNamespaces []string `protobuf:"bytes,6,rep,name=allowed_downstream_namespaces,json=allowedDownstreamNamespaces,proto3" json:"allowed_downstream_namespaces,omitempty"` -} - -func (x *ReplayDryRunRequest) Reset() { - *x = ReplayDryRunRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayDryRunRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayDryRunRequest) ProtoMessage() {} - -func (x *ReplayDryRunRequest) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayDryRunRequest.ProtoReflect.Descriptor instead. -func (*ReplayDryRunRequest) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{2} -} - -func (x *ReplayDryRunRequest) GetProjectName() string { - if x != nil { - return x.ProjectName - } - return "" -} - -func (x *ReplayDryRunRequest) GetJobName() string { - if x != nil { - return x.JobName - } - return "" -} - -func (x *ReplayDryRunRequest) GetNamespaceName() string { - if x != nil { - return x.NamespaceName - } - return "" -} - -func (x *ReplayDryRunRequest) GetStartDate() string { - if x != nil { - return x.StartDate - } - return "" -} - -func (x *ReplayDryRunRequest) GetEndDate() string { - if x != nil { - return x.EndDate - } - return "" -} - -func (x *ReplayDryRunRequest) GetAllowedDownstreamNamespaces() []string { - if x != nil { - return x.AllowedDownstreamNamespaces - } - return nil -} - -type ReplayDryRunResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - // Deprecated: Do not use. - Response *ReplayExecutionTreeNode `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` - ExecutionTree *ReplayExecutionTreeNode `protobuf:"bytes,3,opt,name=execution_tree,json=executionTree,proto3" json:"execution_tree,omitempty"` - IgnoredJobs []string `protobuf:"bytes,4,rep,name=ignored_jobs,json=ignoredJobs,proto3" json:"ignored_jobs,omitempty"` -} - -func (x *ReplayDryRunResponse) Reset() { - *x = ReplayDryRunResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayDryRunResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayDryRunResponse) ProtoMessage() {} - -func (x *ReplayDryRunResponse) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayDryRunResponse.ProtoReflect.Descriptor instead. -func (*ReplayDryRunResponse) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{3} -} - -func (x *ReplayDryRunResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -// Deprecated: Do not use. -func (x *ReplayDryRunResponse) GetResponse() *ReplayExecutionTreeNode { - if x != nil { - return x.Response - } - return nil -} - -func (x *ReplayDryRunResponse) GetExecutionTree() *ReplayExecutionTreeNode { - if x != nil { - return x.ExecutionTree - } - return nil -} - -func (x *ReplayDryRunResponse) GetIgnoredJobs() []string { - if x != nil { - return x.IgnoredJobs - } - return nil -} - -type ReplayExecutionTreeNode struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - Dependents []*ReplayExecutionTreeNode `protobuf:"bytes,2,rep,name=dependents,proto3" json:"dependents,omitempty"` - Runs []*timestamppb.Timestamp `protobuf:"bytes,3,rep,name=runs,proto3" json:"runs,omitempty"` -} - -func (x *ReplayExecutionTreeNode) Reset() { - *x = ReplayExecutionTreeNode{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayExecutionTreeNode) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayExecutionTreeNode) ProtoMessage() {} - -func (x *ReplayExecutionTreeNode) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayExecutionTreeNode.ProtoReflect.Descriptor instead. -func (*ReplayExecutionTreeNode) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{4} -} - -func (x *ReplayExecutionTreeNode) GetJobName() string { - if x != nil { - return x.JobName - } - return "" -} - -func (x *ReplayExecutionTreeNode) GetDependents() []*ReplayExecutionTreeNode { - if x != nil { - return x.Dependents - } - return nil -} - -func (x *ReplayExecutionTreeNode) GetRuns() []*timestamppb.Timestamp { - if x != nil { - return x.Runs - } - return nil -} - -type GetReplayStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - State string `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` - Response *ReplayStatusTreeNode `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` -} - -func (x *GetReplayStatusResponse) Reset() { - *x = GetReplayStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetReplayStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetReplayStatusResponse) ProtoMessage() {} - -func (x *GetReplayStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetReplayStatusResponse.ProtoReflect.Descriptor instead. -func (*GetReplayStatusResponse) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{5} -} - -func (x *GetReplayStatusResponse) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *GetReplayStatusResponse) GetResponse() *ReplayStatusTreeNode { - if x != nil { - return x.Response - } - return nil -} - -type ReplayStatusTreeNode struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - Dependents []*ReplayStatusTreeNode `protobuf:"bytes,2,rep,name=dependents,proto3" json:"dependents,omitempty"` - Runs []*ReplayStatusRun `protobuf:"bytes,3,rep,name=runs,proto3" json:"runs,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` -} - -func (x *ReplayStatusTreeNode) Reset() { - *x = ReplayStatusTreeNode{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayStatusTreeNode) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayStatusTreeNode) ProtoMessage() {} - -func (x *ReplayStatusTreeNode) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayStatusTreeNode.ProtoReflect.Descriptor instead. -func (*ReplayStatusTreeNode) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{6} -} - -func (x *ReplayStatusTreeNode) GetJobName() string { - if x != nil { - return x.JobName - } - return "" -} - -func (x *ReplayStatusTreeNode) GetDependents() []*ReplayStatusTreeNode { - if x != nil { - return x.Dependents - } - return nil -} - -func (x *ReplayStatusTreeNode) GetRuns() []*ReplayStatusRun { - if x != nil { - return x.Runs - } - return nil -} - -func (x *ReplayStatusTreeNode) GetState() string { - if x != nil { - return x.State - } - return "" -} - -type ReplayStatusRun struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Run *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=run,proto3" json:"run,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` -} - -func (x *ReplayStatusRun) Reset() { - *x = ReplayStatusRun{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReplayStatusRun) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplayStatusRun) ProtoMessage() {} - -func (x *ReplayStatusRun) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplayStatusRun.ProtoReflect.Descriptor instead. -func (*ReplayStatusRun) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{7} -} - -func (x *ReplayStatusRun) GetRun() *timestamppb.Timestamp { - if x != nil { - return x.Run + return x.StartTime } return nil } -func (x *ReplayStatusRun) GetState() string { - if x != nil { - return x.State - } - return "" -} - -type GetReplayStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - ProjectName string `protobuf:"bytes,3,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` -} - -func (x *GetReplayStatusRequest) Reset() { - *x = GetReplayStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetReplayStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetReplayStatusRequest) ProtoMessage() {} - -func (x *GetReplayStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetReplayStatusRequest.ProtoReflect.Descriptor instead. -func (*GetReplayStatusRequest) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{8} -} - -func (x *GetReplayStatusRequest) GetId() string { +func (x *ReplayRequest) GetEndTime() *timestamppb.Timestamp { if x != nil { - return x.Id + return x.EndTime } - return "" + return nil } -func (x *GetReplayStatusRequest) GetJobName() string { +func (x *ReplayRequest) GetParallel() bool { if x != nil { - return x.JobName + return x.Parallel } - return "" + return false } -func (x *GetReplayStatusRequest) GetProjectName() string { +func (x *ReplayRequest) GetDescription() string { if x != nil { - return x.ProjectName + return x.Description } return "" } -type ListReplaysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ProjectName string `protobuf:"bytes,1,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` -} - -func (x *ListReplaysRequest) Reset() { - *x = ListReplaysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListReplaysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListReplaysRequest) ProtoMessage() {} - -func (x *ListReplaysRequest) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListReplaysRequest.ProtoReflect.Descriptor instead. -func (*ListReplaysRequest) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{9} -} - -func (x *ListReplaysRequest) GetProjectName() string { +func (x *ReplayRequest) GetJobConfig() string { if x != nil { - return x.ProjectName + return x.JobConfig } return "" } -type ListReplaysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReplayList []*ReplaySpec `protobuf:"bytes,1,rep,name=replay_list,json=replayList,proto3" json:"replay_list,omitempty"` -} - -func (x *ListReplaysResponse) Reset() { - *x = ListReplaysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListReplaysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListReplaysResponse) ProtoMessage() {} - -func (x *ListReplaysResponse) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListReplaysResponse.ProtoReflect.Descriptor instead. -func (*ListReplaysResponse) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{10} -} - -func (x *ListReplaysResponse) GetReplayList() []*ReplaySpec { - if x != nil { - return x.ReplayList - } - return nil -} - -type ReplaySpec struct { +type ReplayResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - StartDate *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_date,json=startDate,proto3" json:"start_date,omitempty"` - EndDate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_date,json=endDate,proto3" json:"end_date,omitempty"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - Config map[string]string `protobuf:"bytes,7,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (x *ReplaySpec) Reset() { - *x = ReplaySpec{} +func (x *ReplayResponse) Reset() { + *x = ReplayResponse{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[11] + mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReplaySpec) String() string { +func (x *ReplayResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplaySpec) ProtoMessage() {} +func (*ReplayResponse) ProtoMessage() {} -func (x *ReplaySpec) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[11] +func (x *ReplayResponse) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -781,60 +161,18 @@ func (x *ReplaySpec) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplaySpec.ProtoReflect.Descriptor instead. -func (*ReplaySpec) Descriptor() ([]byte, []int) { - return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{11} +// Deprecated: Use ReplayResponse.ProtoReflect.Descriptor instead. +func (*ReplayResponse) Descriptor() ([]byte, []int) { + return file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP(), []int{1} } -func (x *ReplaySpec) GetId() string { +func (x *ReplayResponse) GetId() string { if x != nil { return x.Id } return "" } -func (x *ReplaySpec) GetJobName() string { - if x != nil { - return x.JobName - } - return "" -} - -func (x *ReplaySpec) GetStartDate() *timestamppb.Timestamp { - if x != nil { - return x.StartDate - } - return nil -} - -func (x *ReplaySpec) GetEndDate() *timestamppb.Timestamp { - if x != nil { - return x.EndDate - } - return nil -} - -func (x *ReplaySpec) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *ReplaySpec) GetCreatedAt() *timestamppb.Timestamp { - if x != nil { - return x.CreatedAt - } - return nil -} - -func (x *ReplaySpec) GetConfig() map[string]string { - if x != nil { - return x.Config - } - return nil -} - var File_odpf_optimus_core_v1beta1_replay_proto protoreflect.FileDescriptor var file_odpf_optimus_core_v1beta1_replay_proto_rawDesc = []byte{ @@ -849,190 +187,49 @@ var file_odpf_optimus_core_v1beta1_replay_proto_rawDesc = []byte{ 0x74, 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x88, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, + 0x74, 0x6f, 0x22, 0xc3, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x44, - 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x43, 0x0a, - 0x0e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x4a, 0x6f, - 0x62, 0x73, 0x22, 0xf8, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, - 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, - 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x65, 0x6e, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x82, 0x02, - 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x12, 0x52, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, - 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x4a, 0x6f, - 0x62, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2e, 0x0a, - 0x04, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x72, 0x75, 0x6e, 0x73, 0x22, 0x7c, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4b, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd8, 0x01, 0x0a, 0x14, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x72, 0x65, 0x65, - 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x4f, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x72, 0x65, 0x65, - 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x3e, 0x0a, 0x04, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, - 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x72, 0x75, 0x6e, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, 0x12, 0x2c, 0x0a, 0x03, 0x72, 0x75, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x03, 0x72, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x66, 0x0a, - 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5d, - 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x70, 0x65, - 0x63, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x80, 0x03, - 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x70, 0x65, 0x63, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, - 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, - 0x74, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x07, 0x65, 0x6e, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x70, 0x65, - 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x32, 0x9d, 0x05, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, - 0x52, 0x75, 0x6e, 0x12, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x22, 0x2d, 0x2f, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x2f, 0x64, 0x72, 0x79, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x90, - 0x01, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0x28, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, - 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x01, - 0x2a, 0x12, 0xad, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x31, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x70, + 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, + 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6a, + 0x6f, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0xa2, 0x01, 0x0a, 0x0d, 0x52, + 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x90, 0x01, 0x0a, + 0x06, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0x28, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2b, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x2f, 0x7b, 0x69, 0x64, - 0x7d, 0x12, 0x9c, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x73, 0x12, 0x2d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x42, 0x8d, 0x01, 0x0a, 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x6e, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x42, 0x14, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x50, 0x01, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6f, 0x64, 0x70, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x92, 0x41, 0x3a, 0x12, 0x05, 0x32, 0x03, 0x30, 0x2e, 0x31, 0x1a, 0x0e, 0x31, - 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x39, 0x31, 0x30, 0x30, 0x22, 0x04, 0x2f, - 0x61, 0x70, 0x69, 0x2a, 0x01, 0x01, 0x72, 0x18, 0x0a, 0x16, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x20, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x01, 0x2a, 0x42, + 0x8d, 0x01, 0x0a, 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x6e, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x42, 0x14, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x50, 0x01, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, + 0x64, 0x70, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x92, 0x41, 0x3a, 0x12, 0x05, 0x32, 0x03, 0x30, 0x2e, 0x31, 0x1a, 0x0e, 0x31, 0x32, + 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x39, 0x31, 0x30, 0x30, 0x22, 0x04, 0x2f, 0x61, + 0x70, 0x69, 0x2a, 0x01, 0x01, 0x72, 0x18, 0x0a, 0x16, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x20, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x20, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1047,50 +244,22 @@ func file_odpf_optimus_core_v1beta1_replay_proto_rawDescGZIP() []byte { return file_odpf_optimus_core_v1beta1_replay_proto_rawDescData } -var file_odpf_optimus_core_v1beta1_replay_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_odpf_optimus_core_v1beta1_replay_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_odpf_optimus_core_v1beta1_replay_proto_goTypes = []interface{}{ - (*ReplayRequest)(nil), // 0: odpf.optimus.core.v1beta1.ReplayRequest - (*ReplayResponse)(nil), // 1: odpf.optimus.core.v1beta1.ReplayResponse - (*ReplayDryRunRequest)(nil), // 2: odpf.optimus.core.v1beta1.ReplayDryRunRequest - (*ReplayDryRunResponse)(nil), // 3: odpf.optimus.core.v1beta1.ReplayDryRunResponse - (*ReplayExecutionTreeNode)(nil), // 4: odpf.optimus.core.v1beta1.ReplayExecutionTreeNode - (*GetReplayStatusResponse)(nil), // 5: odpf.optimus.core.v1beta1.GetReplayStatusResponse - (*ReplayStatusTreeNode)(nil), // 6: odpf.optimus.core.v1beta1.ReplayStatusTreeNode - (*ReplayStatusRun)(nil), // 7: odpf.optimus.core.v1beta1.ReplayStatusRun - (*GetReplayStatusRequest)(nil), // 8: odpf.optimus.core.v1beta1.GetReplayStatusRequest - (*ListReplaysRequest)(nil), // 9: odpf.optimus.core.v1beta1.ListReplaysRequest - (*ListReplaysResponse)(nil), // 10: odpf.optimus.core.v1beta1.ListReplaysResponse - (*ReplaySpec)(nil), // 11: odpf.optimus.core.v1beta1.ReplaySpec - nil, // 12: odpf.optimus.core.v1beta1.ReplaySpec.ConfigEntry - (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*ReplayRequest)(nil), // 0: odpf.optimus.core.v1beta1.ReplayRequest + (*ReplayResponse)(nil), // 1: odpf.optimus.core.v1beta1.ReplayResponse + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp } var file_odpf_optimus_core_v1beta1_replay_proto_depIdxs = []int32{ - 4, // 0: odpf.optimus.core.v1beta1.ReplayDryRunResponse.response:type_name -> odpf.optimus.core.v1beta1.ReplayExecutionTreeNode - 4, // 1: odpf.optimus.core.v1beta1.ReplayDryRunResponse.execution_tree:type_name -> odpf.optimus.core.v1beta1.ReplayExecutionTreeNode - 4, // 2: odpf.optimus.core.v1beta1.ReplayExecutionTreeNode.dependents:type_name -> odpf.optimus.core.v1beta1.ReplayExecutionTreeNode - 13, // 3: odpf.optimus.core.v1beta1.ReplayExecutionTreeNode.runs:type_name -> google.protobuf.Timestamp - 6, // 4: odpf.optimus.core.v1beta1.GetReplayStatusResponse.response:type_name -> odpf.optimus.core.v1beta1.ReplayStatusTreeNode - 6, // 5: odpf.optimus.core.v1beta1.ReplayStatusTreeNode.dependents:type_name -> odpf.optimus.core.v1beta1.ReplayStatusTreeNode - 7, // 6: odpf.optimus.core.v1beta1.ReplayStatusTreeNode.runs:type_name -> odpf.optimus.core.v1beta1.ReplayStatusRun - 13, // 7: odpf.optimus.core.v1beta1.ReplayStatusRun.run:type_name -> google.protobuf.Timestamp - 11, // 8: odpf.optimus.core.v1beta1.ListReplaysResponse.replay_list:type_name -> odpf.optimus.core.v1beta1.ReplaySpec - 13, // 9: odpf.optimus.core.v1beta1.ReplaySpec.start_date:type_name -> google.protobuf.Timestamp - 13, // 10: odpf.optimus.core.v1beta1.ReplaySpec.end_date:type_name -> google.protobuf.Timestamp - 13, // 11: odpf.optimus.core.v1beta1.ReplaySpec.created_at:type_name -> google.protobuf.Timestamp - 12, // 12: odpf.optimus.core.v1beta1.ReplaySpec.config:type_name -> odpf.optimus.core.v1beta1.ReplaySpec.ConfigEntry - 2, // 13: odpf.optimus.core.v1beta1.ReplayService.ReplayDryRun:input_type -> odpf.optimus.core.v1beta1.ReplayDryRunRequest - 0, // 14: odpf.optimus.core.v1beta1.ReplayService.Replay:input_type -> odpf.optimus.core.v1beta1.ReplayRequest - 8, // 15: odpf.optimus.core.v1beta1.ReplayService.GetReplayStatus:input_type -> odpf.optimus.core.v1beta1.GetReplayStatusRequest - 9, // 16: odpf.optimus.core.v1beta1.ReplayService.ListReplays:input_type -> odpf.optimus.core.v1beta1.ListReplaysRequest - 3, // 17: odpf.optimus.core.v1beta1.ReplayService.ReplayDryRun:output_type -> odpf.optimus.core.v1beta1.ReplayDryRunResponse - 1, // 18: odpf.optimus.core.v1beta1.ReplayService.Replay:output_type -> odpf.optimus.core.v1beta1.ReplayResponse - 5, // 19: odpf.optimus.core.v1beta1.ReplayService.GetReplayStatus:output_type -> odpf.optimus.core.v1beta1.GetReplayStatusResponse - 10, // 20: odpf.optimus.core.v1beta1.ReplayService.ListReplays:output_type -> odpf.optimus.core.v1beta1.ListReplaysResponse - 17, // [17:21] is the sub-list for method output_type - 13, // [13:17] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 2, // 0: odpf.optimus.core.v1beta1.ReplayRequest.start_time:type_name -> google.protobuf.Timestamp + 2, // 1: odpf.optimus.core.v1beta1.ReplayRequest.end_time:type_name -> google.protobuf.Timestamp + 0, // 2: odpf.optimus.core.v1beta1.ReplayService.Replay:input_type -> odpf.optimus.core.v1beta1.ReplayRequest + 1, // 3: odpf.optimus.core.v1beta1.ReplayService.Replay:output_type -> odpf.optimus.core.v1beta1.ReplayResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_odpf_optimus_core_v1beta1_replay_proto_init() } @@ -1123,126 +292,6 @@ func file_odpf_optimus_core_v1beta1_replay_proto_init() { return nil } } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayDryRunRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayDryRunResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayExecutionTreeNode); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplayStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayStatusTreeNode); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayStatusRun); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplayStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListReplaysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListReplaysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_odpf_optimus_core_v1beta1_replay_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplaySpec); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1250,7 +299,7 @@ func file_odpf_optimus_core_v1beta1_replay_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_odpf_optimus_core_v1beta1_replay_proto_rawDesc, NumEnums: 0, - NumMessages: 13, + NumMessages: 2, NumExtensions: 0, NumServices: 1, }, diff --git a/protos/odpf/optimus/core/v1beta1/replay.pb.gw.go b/protos/odpf/optimus/core/v1beta1/replay.pb.gw.go index f46fd5f85e..9d75015115 100644 --- a/protos/odpf/optimus/core/v1beta1/replay.pb.gw.go +++ b/protos/odpf/optimus/core/v1beta1/replay.pb.gw.go @@ -31,74 +31,6 @@ var _ = runtime.String var _ = utilities.NewDoubleArray var _ = metadata.Join -func request_ReplayService_ReplayDryRun_0(ctx context.Context, marshaler runtime.Marshaler, client ReplayServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReplayDryRunRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - msg, err := client.ReplayDryRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_ReplayService_ReplayDryRun_0(ctx context.Context, marshaler runtime.Marshaler, server ReplayServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReplayDryRunRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - msg, err := server.ReplayDryRun(ctx, &protoReq) - return msg, metadata, err - -} - func request_ReplayService_Replay_0(ctx context.Context, marshaler runtime.Marshaler, client ReplayServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ReplayRequest var metadata runtime.ServerMetadata @@ -167,177 +99,12 @@ func local_request_ReplayService_Replay_0(ctx context.Context, marshaler runtime } -var ( - filter_ReplayService_GetReplayStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{"project_name": 0, "id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} -) - -func request_ReplayService_GetReplayStatus_0(ctx context.Context, marshaler runtime.Marshaler, client ReplayServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetReplayStatusRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ReplayService_GetReplayStatus_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetReplayStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_ReplayService_GetReplayStatus_0(ctx context.Context, marshaler runtime.Marshaler, server ReplayServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetReplayStatusRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ReplayService_GetReplayStatus_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetReplayStatus(ctx, &protoReq) - return msg, metadata, err - -} - -func request_ReplayService_ListReplays_0(ctx context.Context, marshaler runtime.Marshaler, client ReplayServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListReplaysRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - msg, err := client.ListReplays(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_ReplayService_ListReplays_0(ctx context.Context, marshaler runtime.Marshaler, server ReplayServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListReplaysRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["project_name"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") - } - - protoReq.ProjectName, err = runtime.String(val) - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) - } - - msg, err := server.ListReplays(ctx, &protoReq) - return msg, metadata, err - -} - // RegisterReplayServiceHandlerServer registers the http handlers for service ReplayService to "mux". // UnaryRPC :call ReplayServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterReplayServiceHandlerFromEndpoint instead. func RegisterReplayServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ReplayServiceServer) error { - mux.Handle("POST", pattern_ReplayService_ReplayDryRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/ReplayDryRun", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay/dryrun")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_ReplayService_ReplayDryRun_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_ReplayDryRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("POST", pattern_ReplayService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -361,52 +128,6 @@ func RegisterReplayServiceHandlerServer(ctx context.Context, mux *runtime.ServeM }) - mux.Handle("GET", pattern_ReplayService_GetReplayStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/GetReplayStatus", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay/{id}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_ReplayService_GetReplayStatus_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_GetReplayStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_ReplayService_ListReplays_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/ListReplays", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_ReplayService_ListReplays_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_ListReplays_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - return nil } @@ -448,26 +169,6 @@ func RegisterReplayServiceHandler(ctx context.Context, mux *runtime.ServeMux, co // "ReplayServiceClient" to call the correct interceptors. func RegisterReplayServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ReplayServiceClient) error { - mux.Handle("POST", pattern_ReplayService_ReplayDryRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/ReplayDryRun", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay/dryrun")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ReplayService_ReplayDryRun_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_ReplayDryRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - mux.Handle("POST", pattern_ReplayService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -488,65 +189,13 @@ func RegisterReplayServiceHandlerClient(ctx context.Context, mux *runtime.ServeM }) - mux.Handle("GET", pattern_ReplayService_GetReplayStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/GetReplayStatus", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay/{id}")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ReplayService_GetReplayStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_GetReplayStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_ReplayService_ListReplays_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.core.v1beta1.ReplayService/ListReplays", runtime.WithHTTPPathPattern("/v1beta1/project/{project_name}/replay")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ReplayService_ListReplays_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_ReplayService_ListReplays_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - return nil } var ( - pattern_ReplayService_ReplayDryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 2, 4}, []string{"v1beta1", "project", "project_name", "replay", "dryrun"}, "")) - pattern_ReplayService_Replay_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1beta1", "project", "project_name", "replay"}, "")) - - pattern_ReplayService_GetReplayStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1beta1", "project", "project_name", "replay", "id"}, "")) - - pattern_ReplayService_ListReplays_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1beta1", "project", "project_name", "replay"}, "")) ) var ( - forward_ReplayService_ReplayDryRun_0 = runtime.ForwardResponseMessage - forward_ReplayService_Replay_0 = runtime.ForwardResponseMessage - - forward_ReplayService_GetReplayStatus_0 = runtime.ForwardResponseMessage - - forward_ReplayService_ListReplays_0 = runtime.ForwardResponseMessage ) diff --git a/protos/odpf/optimus/core/v1beta1/replay.swagger.json b/protos/odpf/optimus/core/v1beta1/replay.swagger.json index b92ed0d8f1..51651331e6 100644 --- a/protos/odpf/optimus/core/v1beta1/replay.swagger.json +++ b/protos/odpf/optimus/core/v1beta1/replay.swagger.json @@ -22,34 +22,6 @@ ], "paths": { "/v1beta1/project/{projectName}/replay": { - "get": { - "operationId": "ReplayService_ListReplays", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v1beta1ListReplaysResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "parameters": [ - { - "name": "projectName", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "ReplayService" - ] - }, "post": { "operationId": "ReplayService_Replay", "responses": { @@ -86,80 +58,22 @@ "namespaceName": { "type": "string" }, - "startDate": { - "type": "string" + "startTime": { + "type": "string", + "format": "date-time" }, - "endDate": { - "type": "string" + "endTime": { + "type": "string", + "format": "date-time" }, - "force": { + "parallel": { "type": "boolean" }, - "allowedDownstreamNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "represents which downstream to be replayed.\npossible values are the namespace names, *, or empty.\n'*' means all namespaces are allowed, empty list means all downstream will be ignored." - } - } - } - } - ], - "tags": [ - "ReplayService" - ] - } - }, - "/v1beta1/project/{projectName}/replay/dryrun": { - "post": { - "operationId": "ReplayService_ReplayDryRun", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v1beta1ReplayDryRunResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "parameters": [ - { - "name": "projectName", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "type": "object", - "properties": { - "jobName": { - "type": "string" - }, - "namespaceName": { - "type": "string" - }, - "startDate": { + "description": { "type": "string" }, - "endDate": { + "jobConfig": { "type": "string" - }, - "allowedDownstreamNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "represents which downstream to be replayed.\npossible values are the namespace names, *, or empty.\n'*' means all namespaces are allowed, empty list means all downstream will be ignored." } } } @@ -169,48 +83,6 @@ "ReplayService" ] } - }, - "/v1beta1/project/{projectName}/replay/{id}": { - "get": { - "operationId": "ReplayService_GetReplayStatus", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v1beta1GetReplayStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "parameters": [ - { - "name": "projectName", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "jobName", - "in": "query", - "required": false, - "type": "string" - } - ], - "tags": [ - "ReplayService" - ] - } } }, "definitions": { @@ -244,147 +116,11 @@ } } }, - "v1beta1GetReplayStatusResponse": { - "type": "object", - "properties": { - "state": { - "type": "string" - }, - "response": { - "$ref": "#/definitions/v1beta1ReplayStatusTreeNode" - } - } - }, - "v1beta1ListReplaysResponse": { - "type": "object", - "properties": { - "replayList": { - "type": "array", - "items": { - "$ref": "#/definitions/v1beta1ReplaySpec" - } - } - } - }, - "v1beta1ReplayDryRunResponse": { - "type": "object", - "properties": { - "success": { - "type": "boolean" - }, - "response": { - "$ref": "#/definitions/v1beta1ReplayExecutionTreeNode" - }, - "executionTree": { - "$ref": "#/definitions/v1beta1ReplayExecutionTreeNode" - }, - "ignoredJobs": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "v1beta1ReplayExecutionTreeNode": { - "type": "object", - "properties": { - "jobName": { - "type": "string" - }, - "dependents": { - "type": "array", - "items": { - "$ref": "#/definitions/v1beta1ReplayExecutionTreeNode" - } - }, - "runs": { - "type": "array", - "items": { - "type": "string", - "format": "date-time" - } - } - } - }, "v1beta1ReplayResponse": { "type": "object", "properties": { "id": { "type": "string" - }, - "ignoredJobs": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "v1beta1ReplaySpec": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "jobName": { - "type": "string" - }, - "startDate": { - "type": "string", - "format": "date-time" - }, - "endDate": { - "type": "string", - "format": "date-time" - }, - "state": { - "type": "string" - }, - "createdAt": { - "type": "string", - "format": "date-time" - }, - "config": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "v1beta1ReplayStatusRun": { - "type": "object", - "properties": { - "run": { - "type": "string", - "format": "date-time" - }, - "state": { - "type": "string" - } - } - }, - "v1beta1ReplayStatusTreeNode": { - "type": "object", - "properties": { - "jobName": { - "type": "string" - }, - "dependents": { - "type": "array", - "items": { - "$ref": "#/definitions/v1beta1ReplayStatusTreeNode" - } - }, - "runs": { - "type": "array", - "items": { - "$ref": "#/definitions/v1beta1ReplayStatusRun" - } - }, - "state": { - "type": "string" } } } diff --git a/protos/odpf/optimus/core/v1beta1/replay_grpc.pb.go b/protos/odpf/optimus/core/v1beta1/replay_grpc.pb.go index e2d51cb1cb..cc9b7a2cbe 100644 --- a/protos/odpf/optimus/core/v1beta1/replay_grpc.pb.go +++ b/protos/odpf/optimus/core/v1beta1/replay_grpc.pb.go @@ -22,10 +22,7 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type ReplayServiceClient interface { - ReplayDryRun(ctx context.Context, in *ReplayDryRunRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) Replay(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayResponse, error) - GetReplayStatus(ctx context.Context, in *GetReplayStatusRequest, opts ...grpc.CallOption) (*GetReplayStatusResponse, error) - ListReplays(ctx context.Context, in *ListReplaysRequest, opts ...grpc.CallOption) (*ListReplaysResponse, error) } type replayServiceClient struct { @@ -36,15 +33,6 @@ func NewReplayServiceClient(cc grpc.ClientConnInterface) ReplayServiceClient { return &replayServiceClient{cc} } -func (c *replayServiceClient) ReplayDryRun(ctx context.Context, in *ReplayDryRunRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) { - out := new(ReplayDryRunResponse) - err := c.cc.Invoke(ctx, "/odpf.optimus.core.v1beta1.ReplayService/ReplayDryRun", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *replayServiceClient) Replay(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayResponse, error) { out := new(ReplayResponse) err := c.cc.Invoke(ctx, "/odpf.optimus.core.v1beta1.ReplayService/Replay", in, out, opts...) @@ -54,32 +42,11 @@ func (c *replayServiceClient) Replay(ctx context.Context, in *ReplayRequest, opt return out, nil } -func (c *replayServiceClient) GetReplayStatus(ctx context.Context, in *GetReplayStatusRequest, opts ...grpc.CallOption) (*GetReplayStatusResponse, error) { - out := new(GetReplayStatusResponse) - err := c.cc.Invoke(ctx, "/odpf.optimus.core.v1beta1.ReplayService/GetReplayStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *replayServiceClient) ListReplays(ctx context.Context, in *ListReplaysRequest, opts ...grpc.CallOption) (*ListReplaysResponse, error) { - out := new(ListReplaysResponse) - err := c.cc.Invoke(ctx, "/odpf.optimus.core.v1beta1.ReplayService/ListReplays", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // ReplayServiceServer is the server API for ReplayService service. // All implementations must embed UnimplementedReplayServiceServer // for forward compatibility type ReplayServiceServer interface { - ReplayDryRun(context.Context, *ReplayDryRunRequest) (*ReplayDryRunResponse, error) Replay(context.Context, *ReplayRequest) (*ReplayResponse, error) - GetReplayStatus(context.Context, *GetReplayStatusRequest) (*GetReplayStatusResponse, error) - ListReplays(context.Context, *ListReplaysRequest) (*ListReplaysResponse, error) mustEmbedUnimplementedReplayServiceServer() } @@ -87,18 +54,9 @@ type ReplayServiceServer interface { type UnimplementedReplayServiceServer struct { } -func (UnimplementedReplayServiceServer) ReplayDryRun(context.Context, *ReplayDryRunRequest) (*ReplayDryRunResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReplayDryRun not implemented") -} func (UnimplementedReplayServiceServer) Replay(context.Context, *ReplayRequest) (*ReplayResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Replay not implemented") } -func (UnimplementedReplayServiceServer) GetReplayStatus(context.Context, *GetReplayStatusRequest) (*GetReplayStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetReplayStatus not implemented") -} -func (UnimplementedReplayServiceServer) ListReplays(context.Context, *ListReplaysRequest) (*ListReplaysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListReplays not implemented") -} func (UnimplementedReplayServiceServer) mustEmbedUnimplementedReplayServiceServer() {} // UnsafeReplayServiceServer may be embedded to opt out of forward compatibility for this service. @@ -112,24 +70,6 @@ func RegisterReplayServiceServer(s grpc.ServiceRegistrar, srv ReplayServiceServe s.RegisterService(&ReplayService_ServiceDesc, srv) } -func _ReplayService_ReplayDryRun_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReplayDryRunRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReplayServiceServer).ReplayDryRun(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/odpf.optimus.core.v1beta1.ReplayService/ReplayDryRun", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReplayServiceServer).ReplayDryRun(ctx, req.(*ReplayDryRunRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ReplayService_Replay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReplayRequest) if err := dec(in); err != nil { @@ -148,42 +88,6 @@ func _ReplayService_Replay_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _ReplayService_GetReplayStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetReplayStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReplayServiceServer).GetReplayStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/odpf.optimus.core.v1beta1.ReplayService/GetReplayStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReplayServiceServer).GetReplayStatus(ctx, req.(*GetReplayStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReplayService_ListReplays_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListReplaysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReplayServiceServer).ListReplays(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/odpf.optimus.core.v1beta1.ReplayService/ListReplays", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReplayServiceServer).ListReplays(ctx, req.(*ListReplaysRequest)) - } - return interceptor(ctx, in, info, handler) -} - // ReplayService_ServiceDesc is the grpc.ServiceDesc for ReplayService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -191,22 +95,10 @@ var ReplayService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "odpf.optimus.core.v1beta1.ReplayService", HandlerType: (*ReplayServiceServer)(nil), Methods: []grpc.MethodDesc{ - { - MethodName: "ReplayDryRun", - Handler: _ReplayService_ReplayDryRun_Handler, - }, { MethodName: "Replay", Handler: _ReplayService_Replay_Handler, }, - { - MethodName: "GetReplayStatus", - Handler: _ReplayService_GetReplayStatus_Handler, - }, - { - MethodName: "ListReplays", - Handler: _ReplayService_ListReplays_Handler, - }, }, Streams: []grpc.StreamDesc{}, Metadata: "odpf/optimus/core/v1beta1/replay.proto", diff --git a/server/optimus.go b/server/optimus.go index 5a425c148f..af831e4f37 100644 --- a/server/optimus.go +++ b/server/optimus.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "os" + "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/hashicorp/go-hclog" @@ -284,7 +285,16 @@ func (s *OptimusServer) setupHandlers() error { if err != nil { return err } - newJobRunService := schedulerService.NewJobRunService(s.logger, jobProviderRepo, jobRunRepo, operatorRunRepository, newScheduler, newPriorityResolver, jobInputCompiler) + + replayRepository := schedulerRepo.NewReplayRepository(s.dbPool) + replayWorker := schedulerService.NewReplayWorker(s.logger, replayRepository, newScheduler, jobProviderRepo, s.conf.Replay) + replayManager := schedulerService.NewReplayManager(s.logger, replayRepository, replayWorker, func() time.Time { + return time.Now().UTC() + }, s.conf.Replay) + replayValidator := schedulerService.NewValidator(replayRepository, newScheduler) + replayService := schedulerService.NewReplayService(replayRepository, jobProviderRepo, replayValidator) + + newJobRunService := schedulerService.NewJobRunService(s.logger, jobProviderRepo, jobRunRepo, replayRepository, operatorRunRepository, newScheduler, newPriorityResolver, jobInputCompiler) // Job Bounded Context Setup jJobRepo := jRepo.NewJobRepository(s.dbPool) @@ -313,6 +323,9 @@ func (s *OptimusServer) setupHandlers() error { // Core Job Handler pb.RegisterJobSpecificationServiceServer(s.grpcServer, jHandler.NewJobHandler(jJobService, s.logger)) + pb.RegisterReplayServiceServer(s.grpcServer, schedulerHandler.NewReplayHandler(s.logger, replayService)) + replayManager.Initialize() + s.cleanupFn = append(s.cleanupFn, func() { err = notificationService.Close() if err != nil { diff --git a/tests/setup/database.go b/tests/setup/database.go index 73df8cb51d..daaf5d4a13 100644 --- a/tests/setup/database.go +++ b/tests/setup/database.go @@ -132,7 +132,8 @@ func TruncateTablesWith(pool *pgxpool.Pool) { ctx := context.Background() pool.Exec(ctx, "TRUNCATE TABLE backup_old, resource_old CASCADE") pool.Exec(ctx, "TRUNCATE TABLE backup CASCADE") - pool.Exec(ctx, "TRUNCATE TABLE replay CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE replay_request CASCADE") + pool.Exec(ctx, "TRUNCATE TABLE replay_run CASCADE") pool.Exec(ctx, "TRUNCATE TABLE resource CASCADE") pool.Exec(ctx, "TRUNCATE TABLE job_run CASCADE")