Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FLINK-36192][autocaler] Autocaler supports adjusting the parallelism of source vertex based on the number of partitions in Kafka or pulsars #879

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import static org.apache.flink.autoscaler.config.AutoScalerOptions.VERTEX_MIN_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.EXPECTED_PROCESSING_RATE;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MAX_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_SOURCE_PARTITIONS;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.TRUE_PROCESSING_RATE;
import static org.apache.flink.autoscaler.topology.ShipStrategy.HASH;
Expand All @@ -66,6 +67,14 @@ public class JobVertexScaler<KEY, Context extends JobAutoScalerContext<KEY>> {
protected static final String INEFFECTIVE_MESSAGE_FORMAT =
"Ineffective scaling detected for %s (expected increase: %s, actual increase %s). Blocking of ineffective scaling decisions is %s";

@VisibleForTesting protected static final String SCALING_LIMITED = "ScalingLimited";

@VisibleForTesting
protected static final String SCALE_LIMITED_MESSAGE_FORMAT =
"Scaling limited detected for %s (expected parallelism: %s, actual parallelism %s). "
+ "Scaling limited due to source partitions : %s,"
+ "upperBoundForAlignment(maxParallelism or parallelismUpperLimit): %s, parallelismLowerLimit: %s.";

private Clock clock = Clock.system(ZoneId.systemDefault());

private final AutoScalerEventHandler<KEY, Context> autoScalerEventHandler;
Expand Down Expand Up @@ -193,12 +202,16 @@ public ParallelismChange computeScaleTargetParallelism(

int newParallelism =
scale(
vertex,
currentParallelism,
inputShipStrategies,
(int) evaluatedMetrics.get(NUM_SOURCE_PARTITIONS).getCurrent(),
(int) evaluatedMetrics.get(MAX_PARALLELISM).getCurrent(),
scaleFactor,
Math.min(currentParallelism, conf.getInteger(VERTEX_MIN_PARALLELISM)),
Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM)));
Math.max(currentParallelism, conf.getInteger(VERTEX_MAX_PARALLELISM)),
autoScalerEventHandler,
context);

if (newParallelism == currentParallelism) {
// Clear delayed scale down request if the new parallelism is equal to
Expand Down Expand Up @@ -345,15 +358,22 @@ private boolean detectIneffectiveScaleUp(
* <p>Also, in order to ensure the data is evenly spread across subtasks, we try to adjust the
* parallelism for source and keyed vertex such that it divides the maxParallelism without a
* remainder.
*
* <p>This method also attempts to adjust the parallelism to ensure it aligns well with the
* number of source partitions if a vertex has a known source partition count.
*/
@VisibleForTesting
protected static int scale(
protected static <KEY, Context extends JobAutoScalerContext<KEY>> int scale(
JobVertexID vertex,
int currentParallelism,
Collection<ShipStrategy> inputShipStrategies,
int numSourcePartitions,
int maxParallelism,
double scaleFactor,
int parallelismLowerLimit,
int parallelismUpperLimit) {
int parallelismUpperLimit,
AutoScalerEventHandler<KEY, Context> eventHandler,
Context context) {
checkArgument(
parallelismLowerLimit <= parallelismUpperLimit,
"The parallelism lower limitation must not be greater than the parallelism upper limitation.");
Expand Down Expand Up @@ -383,23 +403,68 @@ protected static int scale(
// Apply min/max parallelism
newParallelism = Math.min(Math.max(parallelismLowerLimit, newParallelism), upperBound);

var adjustByMaxParallelism =
inputShipStrategies.isEmpty() || inputShipStrategies.contains(HASH);
if (!adjustByMaxParallelism) {
var adjustByMaxParallelismOrPartitions =
numSourcePartitions > 0 || inputShipStrategies.contains(HASH);
if (!adjustByMaxParallelismOrPartitions) {
return newParallelism;
1996fanrui marked this conversation as resolved.
Show resolved Hide resolved
}

// When the shuffle type of vertex inputs contains keyBy or vertex is a source, we try to
// adjust the parallelism such that it divides the maxParallelism without a remainder
// => data is evenly spread across subtasks
for (int p = newParallelism; p <= maxParallelism / 2 && p <= upperBound; p++) {
if (maxParallelism % p == 0) {
final int numKeyGroupsOrPartitions;
final int upperBoundForAlignment;
if (numSourcePartitions <= 0) {
numKeyGroupsOrPartitions = maxParallelism;
upperBoundForAlignment =
Math.min(
// Optimize the case where newParallelism <= maxParallelism / 2
newParallelism > maxParallelism / 2
? maxParallelism
: maxParallelism / 2,
upperBound);
} else {
numKeyGroupsOrPartitions = numSourcePartitions;
upperBoundForAlignment = Math.min(numSourcePartitions, upperBound);
}
Comment on lines +412 to +426
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Optimize the case where newParallelism <= maxParallelism / 2

Why need this this optimization? Reducing the count of for loop?

I'm curious why source partition doesn't use this optimization? If both of source and keygroup could use this optimization, does the following code work?

Suggested change
final int numKeyGroupsOrPartitions;
final int upperBoundForAlignment;
if (numSourcePartitions <= 0) {
numKeyGroupsOrPartitions = maxParallelism;
upperBoundForAlignment =
Math.min(
// Optimize the case where newParallelism <= maxParallelism / 2
newParallelism > maxParallelism / 2
? maxParallelism
: maxParallelism / 2,
upperBound);
} else {
numKeyGroupsOrPartitions = numSourcePartitions;
upperBoundForAlignment = Math.min(numSourcePartitions, upperBound);
}
var numKeyGroupsOrPartitions = numSourcePartitions <= 0 ? maxParallelism : numSourcePartitions;
var upperBoundForAlignment =
Math.min(
// Optimize the case where newParallelism <= maxParallelism / 2
newParallelism > numKeyGroupsOrPartitions / 2
? numKeyGroupsOrPartitions
: numKeyGroupsOrPartitions / 2,
upperBound);


// When the shuffle type of vertex inputs contains keyBy or vertex is a source,
// we try to adjust the parallelism such that it divides
// the adjustableMaxParallelism without a remainder => data is evenly spread across subtasks
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// the adjustableMaxParallelism without a remainder => data is evenly spread across subtasks
// the numKeyGroupsOrPartitions without a remainder => data is evenly spread across subtasks

for (int p = newParallelism; p <= upperBoundForAlignment; p++) {
if (numKeyGroupsOrPartitions % p == 0) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

About this comment #879 (comment), I'm thinking whether the following change is more reasonable?

Note: numKeyGroupsOrPartitions / p means how many source partitions or key groups every subtask consume.

Suggested change
if (numKeyGroupsOrPartitions % p == 0) {
if (numKeyGroupsOrPartitions % p == 0 || numKeyGroupsOrPartitions / p < numKeyGroupsOrPartitions / newParallelism) {

For example: maxParallelism is 200, and new parallelism is 60. (Some subtasks consume 4 keyGroups, the rest of subtask consume 3 keyGroups)

  • The final parallelism is 100 based on the main branch code due to we only return p when maxParallelism % p == 0.
  • But I think 67 is more reasonable here. (One subtask consumes 2 key groups. The remaining 66 subtasks, each subtask consumes 3 key groups.)

@mxm @gyfora , WDYT?

Also, it's a bit beyond the scope of this PR. I could file a separate PR if you think it makes sense. Of course, it's acceptable to be done at this PR.

return p;
}
}

// If parallelism adjustment fails, use originally computed parallelism
return newParallelism;
// When adjust the parallelism after rounding up cannot be evenly divided by
// numKeyGroupsOrPartitions, Try to find the smallest parallelism that can satisfy the
// current consumption rate.
int p = newParallelism;
for (; p > 0; p--) {
if (numKeyGroupsOrPartitions / p > numKeyGroupsOrPartitions / newParallelism) {
if (numKeyGroupsOrPartitions % p != 0) {
p++;
}
break;
}
}
Comment on lines +432 to +448
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i found our discussion cannot cover all cases during I review this part in detail.

For example: sourcePartition is 199, and new parallelism is 99. IIUC, the final parallelism is 67(every subtask consume 3 source partitions, except for the last subtask), right?

But 100 as the final parallelism makes sense to me(every subtask consume 2 source partitions, except for the last subtask).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Follow #879 (comment) . I found the current logic isn't perfect even if sourcePartitionNumber is 200.


p = Math.max(p, parallelismLowerLimit);
var message =
String.format(
SCALE_LIMITED_MESSAGE_FORMAT,
vertex,
newParallelism,
p,
numSourcePartitions,
upperBound,
parallelismLowerLimit);
eventHandler.handleEvent(
context,
AutoScalerEventHandler.Type.Warning,
SCALING_LIMITED,
message,
SCALING_LIMITED + vertex + (scaleFactor * currentParallelism),
context.getConfiguration().get(SCALING_EVENT_INTERVAL));
return p;
}

@VisibleForTesting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ protected JobTopology getJobTopology(

Set<JobVertexID> vertexSet = Set.copyOf(t.getVerticesInTopologicalOrder());
updateVertexList(stateStore, ctx, clock.instant(), vertexSet);
updateKafkaPulsarSourceMaxParallelisms(ctx, jobDetailsInfo.getJobId(), t);
updateKafkaPulsarSourceNumPartitions(ctx, jobDetailsInfo.getJobId(), t);
excludeVerticesFromScaling(ctx.getConfiguration(), t.getFinishedVertices());
return t;
}
Expand Down Expand Up @@ -249,7 +249,7 @@ protected JobTopology getJobTopology(JobDetailsInfo jobDetailsInfo) {
json, slotSharingGroupIdMap, maxParallelismMap, metrics, finished);
}

private void updateKafkaPulsarSourceMaxParallelisms(
private void updateKafkaPulsarSourceNumPartitions(
Context ctx, JobID jobId, JobTopology topology) throws Exception {
try (var restClient = ctx.getRestClusterClient()) {
Pattern partitionRegex =
Expand Down Expand Up @@ -284,7 +284,7 @@ private void updateKafkaPulsarSourceMaxParallelisms(
"Updating source {} max parallelism based on available partitions to {}",
sourceVertex,
numPartitions);
topology.get(sourceVertex).updateMaxParallelism((int) numPartitions);
topology.get(sourceVertex).setNumSourcePartitions((int) numPartitions);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MANAGED_MEMORY_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.MAX_PARALLELISM;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.METASPACE_MEMORY_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_SOURCE_PARTITIONS;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.NUM_TASK_SLOTS_USED;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.OBSERVED_TPR;
import static org.apache.flink.autoscaler.metrics.ScalingMetric.PARALLELISM;
Expand Down Expand Up @@ -166,6 +167,11 @@ private Map<ScalingMetric, EvaluatedScalingMetric> evaluateMetrics(

evaluatedMetrics.put(
MAX_PARALLELISM, EvaluatedScalingMetric.of(vertexInfo.getMaxParallelism()));

evaluatedMetrics.put(
NUM_SOURCE_PARTITIONS,
EvaluatedScalingMetric.of(vertexInfo.getNumSourcePartitions()));

computeProcessingRateThresholds(evaluatedMetrics, conf, processingBacklog, restartTime);
return evaluatedMetrics;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ public enum ScalingMetric {

/** Job vertex max parallelism. */
MAX_PARALLELISM(false),

/** Source vertex partition count. */
NUM_SOURCE_PARTITIONS(false),
/** Upper boundary of the target data rate range. */
SCALE_UP_RATE_THRESHOLD(false),

Expand Down Expand Up @@ -101,6 +104,7 @@ public enum ScalingMetric {
PARALLELISM,
RECOMMENDED_PARALLELISM,
MAX_PARALLELISM,
NUM_SOURCE_PARTITIONS,
SCALE_UP_RATE_THRESHOLD,
SCALE_DOWN_RATE_THRESHOLD,
EXPECTED_PROCESSING_RATE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public class VertexInfo {
@Setter(AccessLevel.NONE)
private int maxParallelism;

private final int originalMaxParallelism;
@Setter private int numSourcePartitions;

private final boolean finished;

Expand All @@ -65,7 +65,6 @@ public VertexInfo(
this.inputs = inputs;
this.parallelism = parallelism;
this.maxParallelism = maxParallelism;
this.originalMaxParallelism = maxParallelism;
this.finished = finished;
this.ioMetrics = ioMetrics;
}
Expand Down Expand Up @@ -99,8 +98,4 @@ public VertexInfo(
int maxParallelism) {
this(id, inputs, parallelism, maxParallelism, null);
}

public void updateMaxParallelism(int maxParallelism) {
this.maxParallelism = Math.min(originalMaxParallelism, maxParallelism);
}
}
Loading
Loading