Skip to content

Commit

Permalink
update prometheus config when scale in (#2387)
Browse files Browse the repository at this point in the history
  • Loading branch information
Yujie-Xie authored Mar 29, 2024
1 parent 16d4ac4 commit 21e4114
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 20 deletions.
30 changes: 24 additions & 6 deletions pkg/cluster/manager/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,11 +150,6 @@ func (m *Manager) DestroyTombstone(
return err
}

// Destroy ignore error and force exec
gOpt.IgnoreConfigCheck = true
gOpt.Force = true
regenConfigTasks, _ := buildInitConfigTasks(m, name, topo, base, gOpt, nodes)

t := b.
Func("FindTomestoneNodes", func(ctx context.Context) (err error) {
if !skipConfirm {
Expand All @@ -172,9 +167,32 @@ func (m *Manager) DestroyTombstone(
ClusterOperate(cluster, operator.DestroyTombstoneOperation, gOpt, tlsCfg).
UpdateMeta(name, clusterMeta, nodes).
UpdateTopology(name, m.specManager.Path(name), clusterMeta, nodes).
Build()

if err := t.Execute(ctx); err != nil {
if errorx.Cast(err) != nil {
// FIXME: Map possible task errors and give suggestions.
return err
}
return perrs.Trace(err)
}

// Destroy ignore error and force exec
gOpt.IgnoreConfigCheck = true
gOpt.Force = true
// get new metadata
metadata, err = m.meta(name)
if err != nil &&
!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {
return err
}
topo = metadata.GetTopology()
base = metadata.GetBaseMeta()
regenConfigTasks, _ := buildInitConfigTasks(m, name, topo, base, gOpt, nodes)
t = b.
ParallelStep("+ Refresh instance configs", gOpt.Force, regenConfigTasks...).
ParallelStep("+ Reload prometheus and grafana", gOpt.Force,
buildReloadPromAndGrafanaTasks(metadata.GetTopology(), m.logger, gOpt)...).
buildReloadPromAndGrafanaTasks(topo, m.logger, gOpt)...).
Build()

if err := t.Execute(ctx); err != nil {
Expand Down
53 changes: 39 additions & 14 deletions pkg/cluster/manager/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,6 @@ func (m *Manager) ScaleIn(

// Regenerate configuration
gOpt.IgnoreConfigCheck = true
regenConfigTasks, hasImported := buildInitConfigTasks(m, name, topo, base, gOpt, nodes)

// handle dir scheme changes
if hasImported {
if err := spec.HandleImportPathMigration(name); err != nil {
return err
}
}

tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir))
if err != nil {
Expand All @@ -118,20 +110,53 @@ func (m *Manager) ScaleIn(
if err != nil {
return err
}

scale(b, metadata, tlsCfg)
ctx := ctxt.New(
context.Background(),
gOpt.Concurrency,
m.logger,
)

if err := b.Build().Execute(ctx); err != nil {
if errorx.Cast(err) != nil {
// FIXME: Map possible task errors and give suggestions.
return err
}
return perrs.Trace(err)
}

// get new metadata
metadata, err = m.meta(name)
if err != nil &&
!errors.Is(perrs.Cause(err), meta.ErrValidate) &&
!errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) &&
!errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) &&
!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {
// ignore conflict check error, node may be deployed by former version
// that lack of some certain conflict checks
return err
}

topo = metadata.GetTopology()
base = metadata.GetBaseMeta()

regenConfigTasks, hasImported := buildInitConfigTasks(m, name, topo, base, gOpt, nodes)
// handle dir scheme changes
if hasImported {
if err := spec.HandleImportPathMigration(name); err != nil {
return err
}
}
b, err = m.sshTaskBuilder(name, topo, base.User, gOpt)
if err != nil {
return err
}
t := b.
ParallelStep("+ Refresh instance configs", force, regenConfigTasks...).
ParallelStep("+ Reload prometheus and grafana", gOpt.Force,
buildReloadPromAndGrafanaTasks(metadata.GetTopology(), m.logger, gOpt, nodes...)...).
Build()

ctx := ctxt.New(
context.Background(),
gOpt.Concurrency,
m.logger,
)
if err := t.Execute(ctx); err != nil {
if errorx.Cast(err) != nil {
// FIXME: Map possible task errors and give suggestions.
Expand Down

0 comments on commit 21e4114

Please sign in to comment.