Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FLINK-36192][autocaler] Optimize the logic when partitions or key groups cannot be evenly distributed to subtasks #879

Merged
merged 10 commits into from
Oct 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import static org.apache.flink.autoscaler.config.AutoScalerOptions.VERTEX_MIN_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.EXPECTED_PROCESSING_RATE;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MAX_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_SOURCE_PARTITIONS;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.TRUE_PROCESSING_RATE;
import static org.apache.flink.autoscaler.topology.ShipStrategy.HASH;
Expand All @@ -66,6 +67,14 @@ public class JobVertexScaler<KEY, Context extends JobAutoScalerContext<KEY>> {
protected static final String INEFFECTIVE_MESSAGE_FORMAT =
"Ineffective scaling detected for %s (expected increase: %s, actual increase %s). Blocking of ineffective scaling decisions is %s";

@VisibleForTesting protected static final String SCALING_LIMITED = "ScalingLimited";

@VisibleForTesting
protected static final String SCALE_LIMITED_MESSAGE_FORMAT =
"Scaling limited detected for %s (expected parallelism: %s, actual parallelism %s). "
+ "Scaling limited due to numKeyGroupsOrPartitions : %s,"
+ "upperBoundForAlignment(maxParallelism or parallelismUpperLimit): %s, parallelismLowerLimit: %s.";

private Clock clock = Clock.system(ZoneId.systemDefault());

private final AutoScalerEventHandler<KEY, Context> autoScalerEventHandler;
Expand Down Expand Up @@ -193,12 +202,16 @@ public ParallelismChange computeScaleTargetParallelism(

int newParallelism =
scale(
vertex,
currentParallelism,
inputShipStrategies,
(int) evaluatedMetrics.get(NUM_SOURCE_PARTITIONS).getCurrent(),
(int) evaluatedMetrics.get(MAX_PARALLELISM).getCurrent(),
scaleFactor,
Math.min(currentParallelism, conf.getInteger(VERTEX_MIN_PARALLELISM)),
Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM)));
Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM)),
autoScalerEventHandler,
context);

if (newParallelism == currentParallelism) {
// Clear delayed scale down request if the new parallelism is equal to
Expand Down Expand Up @@ -345,15 +358,22 @@ private boolean detectIneffectiveScaleUp(
* <p>Also, in order to ensure the data is evenly spread across subtasks, we try to adjust the
* parallelism for source and keyed vertex such that it divides the maxParallelism without a
* remainder.
*
* <p>This method also attempts to adjust the parallelism to ensure it aligns well with the
* number of source partitions if a vertex has a known source partition count.
*/
@VisibleForTesting
protected static int scale(
protected static <KEY, Context extends JobAutoScalerContext<KEY>> int scale(
JobVertexID vertex,
int currentParallelism,
Collection<ShipStrategy> inputShipStrategies,
int numSourcePartitions,
int maxParallelism,
double scaleFactor,
int parallelismLowerLimit,
int parallelismUpperLimit) {
int parallelismUpperLimit,
AutoScalerEventHandler<KEY, Context> eventHandler,
Context context) {
checkArgument(
parallelismLowerLimit <= parallelismUpperLimit,
"The parallelism lower limitation must not be greater than the parallelism upper limitation.");
Expand Down Expand Up @@ -383,23 +403,62 @@ protected static int scale(
// Apply min/max parallelism
newParallelism = Math.min(Math.max(parallelismLowerLimit, newParallelism), upperBound);

var adjustByMaxParallelism =
inputShipStrategies.isEmpty() || inputShipStrategies.contains(HASH);
if (!adjustByMaxParallelism) {
var adjustByMaxParallelismOrPartitions =
numSourcePartitions > 0 || inputShipStrategies.contains(HASH);
if (!adjustByMaxParallelismOrPartitions) {
return newParallelism;
1996fanrui marked this conversation as resolved.
Show resolved Hide resolved
}

// When the shuffle type of vertex inputs contains keyBy or vertex is a source, we try to
// adjust the parallelism such that it divides the maxParallelism without a remainder
// => data is evenly spread across subtasks
for (int p = newParallelism; p <= maxParallelism / 2 && p <= upperBound; p++) {
if (maxParallelism % p == 0) {
var numKeyGroupsOrPartitions =
numSourcePartitions <= 0 ? maxParallelism : numSourcePartitions;
var upperBoundForAlignment =
Math.min(
// Optimize the case where newParallelism <= maxParallelism / 2
newParallelism > numKeyGroupsOrPartitions / 2
? numKeyGroupsOrPartitions
: numKeyGroupsOrPartitions / 2,
upperBound);

// When the shuffle type of vertex inputs contains keyBy or vertex is a source,
// we try to adjust the parallelism such that it divides
// the numKeyGroupsOrPartitions without a remainder => data is evenly spread across subtasks
for (int p = newParallelism; p <= upperBoundForAlignment; p++) {
if (numKeyGroupsOrPartitions % p == 0) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

About this comment #879 (comment), I'm thinking whether the following change is more reasonable?

Note: numKeyGroupsOrPartitions / p means how many source partitions or key groups every subtask consume.

Suggested change
if (numKeyGroupsOrPartitions % p == 0) {
if (numKeyGroupsOrPartitions % p == 0 || numKeyGroupsOrPartitions / p < numKeyGroupsOrPartitions / newParallelism) {

For example: maxParallelism is 200, and new parallelism is 60. (Some subtasks consume 4 keyGroups, the rest of subtask consume 3 keyGroups)

  • The final parallelism is 100 based on the main branch code due to we only return p when maxParallelism % p == 0.
  • But I think 67 is more reasonable here. (One subtask consumes 2 key groups. The remaining 66 subtasks, each subtask consumes 3 key groups.)

@mxm @gyfora , WDYT?

Also, it's a bit beyond the scope of this PR. I could file a separate PR if you think it makes sense. Of course, it's acceptable to be done at this PR.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that makes sense, but it makes the scaling more aggressive and less balanced. If we want to be more conservative, maybe 100 is ok in this scenario, where there is actually a divisor without a remainder. When there isn't, I think what you propose is way better than just using the initially provided parallelism.

In summary, I'm proposing to do a two-step process, similarly as for the partitions, where we first try to find a parallelism that divides the key groups without a remainder, and if that fails we do what you propose.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that makes sense, but it makes the scaling more aggressive and less balanced. If we want to be more conservative, maybe 100 is ok in this scenario, where there is actually a divisor without a remainder. When there isn't, I think what you propose is way better than just using the initially provided parallelism.

In summary, I'm proposing to do a two-step process, similarly as for the partitions, where we first try to find a parallelism that divides the key groups without a remainder, and if that fails we do what you propose.

Thanks for the review, I agree with this, We can introduce an additional parameter to enable a more aggressive strategy: #879 (comment). But by default, a divisor of the number of partitions is still used to ensure balanced consumption.
WDYT? @mxm @1996fanrui

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds make sense to me, we could introduce an additional parameter to enable a more aggressive strategy.

I wanna check with @huyuanfeng2018 and @mxm : The strategy will work for both source partition and key group, right? As I understand, we could unify the strategy for these 2 cases.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wanna check with @huyuanfeng2018 and @mxm : The strategy will work for both source partition and key group, right? As I understand, we could unify the strategy for these 2 cases.

I think we can unify strategies

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good!

return p;
}
}

// If parallelism adjustment fails, use originally computed parallelism
return newParallelism;
// When adjust the parallelism after rounding up cannot be evenly divided by
// numKeyGroupsOrPartitions, Try to find the smallest parallelism that can satisfy the
// current consumption rate.
int p = newParallelism;
for (; p > 0; p--) {
if (numKeyGroupsOrPartitions / p > numKeyGroupsOrPartitions / newParallelism) {
if (numKeyGroupsOrPartitions % p != 0) {
p++;
}
break;
}
}
1996fanrui marked this conversation as resolved.
Show resolved Hide resolved

p = Math.max(p, parallelismLowerLimit);
var message =
String.format(
SCALE_LIMITED_MESSAGE_FORMAT,
vertex,
newParallelism,
p,
numKeyGroupsOrPartitions,
upperBound,
parallelismLowerLimit);
eventHandler.handleEvent(
context,
AutoScalerEventHandler.Type.Warning,
SCALING_LIMITED,
message,
SCALING_LIMITED + vertex + (scaleFactor * currentParallelism),
context.getConfiguration().get(SCALING_EVENT_INTERVAL));
return p;
}

@VisibleForTesting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ protected JobTopology getJobTopology(

Set<JobVertexID> vertexSet = Set.copyOf(t.getVerticesInTopologicalOrder());
updateVertexList(stateStore, ctx, clock.instant(), vertexSet);
updateKafkaPulsarSourceMaxParallelisms(ctx, jobDetailsInfo.getJobId(), t);
updateKafkaPulsarSourceNumPartitions(ctx, jobDetailsInfo.getJobId(), t);
excludeVerticesFromScaling(ctx.getConfiguration(), t.getFinishedVertices());
return t;
}
Expand Down Expand Up @@ -249,7 +249,7 @@ protected JobTopology getJobTopology(JobDetailsInfo jobDetailsInfo) {
json, slotSharingGroupIdMap, maxParallelismMap, metrics, finished);
}

private void updateKafkaPulsarSourceMaxParallelisms(
private void updateKafkaPulsarSourceNumPartitions(
Context ctx, JobID jobId, JobTopology topology) throws Exception {
try (var restClient = ctx.getRestClusterClient()) {
Pattern partitionRegex =
Expand Down Expand Up @@ -284,7 +284,7 @@ private void updateKafkaPulsarSourceMaxParallelisms(
"Updating source {} max parallelism based on available partitions to {}",
sourceVertex,
numPartitions);
topology.get(sourceVertex).updateMaxParallelism((int) numPartitions);
topology.get(sourceVertex).setNumSourcePartitions((int) numPartitions);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MANAGED_MEMORY_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MAX_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.METASPACE_MEMORY_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_SOURCE_PARTITIONS;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_TASK_SLOTS_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.OBSERVED_TPR;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.PARALLELISM;
Expand Down Expand Up @@ -166,6 +167,11 @@ private Map<ScalingMetric, EvaluatedScalingMetric> evaluateMetrics(

evaluatedMetrics.put(
MAX_PARALLELISM, EvaluatedScalingMetric.of(vertexInfo.getMaxParallelism()));

evaluatedMetrics.put(
NUM_SOURCE_PARTITIONS,
EvaluatedScalingMetric.of(vertexInfo.getNumSourcePartitions()));

computeProcessingRateThresholds(evaluatedMetrics, conf, processingBacklog, restartTime);
return evaluatedMetrics;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ public enum ScalingMetric {

/** Job vertex max parallelism. */
MAX_PARALLELISM(false),

/** Source vertex partition count. */
NUM_SOURCE_PARTITIONS(false),

/** Upper boundary of the target data rate range. */
SCALE_UP_RATE_THRESHOLD(false),

Expand Down Expand Up @@ -101,6 +105,7 @@ public enum ScalingMetric {
PARALLELISM,
RECOMMENDED_PARALLELISM,
MAX_PARALLELISM,
NUM_SOURCE_PARTITIONS,
SCALE_UP_RATE_THRESHOLD,
SCALE_DOWN_RATE_THRESHOLD,
EXPECTED_PROCESSING_RATE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public class VertexInfo {
@Setter(AccessLevel.NONE)
private int maxParallelism;

private final int originalMaxParallelism;
@Setter private int numSourcePartitions;

private final boolean finished;

Expand All @@ -65,7 +65,6 @@ public VertexInfo(
this.inputs = inputs;
this.parallelism = parallelism;
this.maxParallelism = maxParallelism;
this.originalMaxParallelism = maxParallelism;
this.finished = finished;
this.ioMetrics = ioMetrics;
}
Expand Down Expand Up @@ -99,8 +98,4 @@ public VertexInfo(
int maxParallelism) {
this(id, inputs, parallelism, maxParallelism, null);
}

public void updateMaxParallelism(int maxParallelism) {
this.maxParallelism = Math.min(originalMaxParallelism, maxParallelism);
}
}
Loading
Loading