Skip to content

Commit

Permalink
[opt](routine load) optimize routine load timeout logic (#40818)
Browse files Browse the repository at this point in the history
If IO/CPU resources are tight, routine load task is likely to timeout.
The current method is self-adaption backoff
#32227, but the problem is it will
do some ineffective work to match proper timeout. For one routine load
task, a better way to handle task is finishing executing instead of
retry when resources are tight. Therefore, this pr increase timeout to
make "task always finish even if it is slow when resources are tight".
  • Loading branch information
sollhui committed Sep 23, 2024
1 parent 76d62c4 commit 2e4fd59
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 66 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1198,6 +1198,12 @@ public class Config extends ConfigBase {
@ConfField(mutable = true, masterOnly = true)
public static int max_routine_load_task_num_per_be = 1024;

/**
* routine load timeout is equal to maxBatchIntervalS * routine_load_task_timeout_multiplier.
*/
@ConfField(mutable = true, masterOnly = true)
public static int routine_load_task_timeout_multiplier = 10;

/**
* the max timeout of get kafka meta.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ public void divideRoutineLoadJob(int currentConcurrentTaskNum) throws UserExcept
((KafkaProgress) progress).getOffsetByPartition(kafkaPartition));
}
KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), id,
maxBatchIntervalS * 2 * 1000, 0, taskKafkaProgress, isMultiTable());
maxBatchIntervalS * Config.routine_load_task_timeout_multiplier * 1000,
taskKafkaProgress, isMultiTable());
routineLoadTaskInfoList.add(kafkaTaskInfo);
result.add(kafkaTaskInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,14 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo {
private Map<Integer, Long> partitionIdToOffset;

public KafkaTaskInfo(UUID id, long jobId,
long timeoutMs, int timeoutBackOffCount,
Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable);
long timeoutMs, Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(id, jobId, timeoutMs, isMultiTable);
this.partitionIdToOffset = partitionIdToOffset;
}

public KafkaTaskInfo(KafkaTaskInfo kafkaTaskInfo, Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(UUID.randomUUID(), kafkaTaskInfo.getJobId(),
kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getTimeoutBackOffCount(),
kafkaTaskInfo.getBeId(), isMultiTable);
kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getBeId(), isMultiTable);
this.partitionIdToOffset = partitionIdToOffset;
this.isEof = kafkaTaskInfo.getIsEof();
}
Expand Down Expand Up @@ -140,11 +138,6 @@ private TExecPlanFragmentParams rePlan(RoutineLoadJob routineLoadJob) throws Use
TExecPlanFragmentParams tExecPlanFragmentParams = routineLoadJob.plan(planner, loadId, txnId);
TPlanFragment tPlanFragment = tExecPlanFragmentParams.getFragment();
tPlanFragment.getOutputSink().getOlapTableSink().setTxnId(txnId);
// it needs update timeout to make task timeout backoff work
long timeoutS = this.getTimeoutMs() / 1000;
tPlanFragment.getOutputSink().getOlapTableSink().setLoadChannelTimeoutS(timeoutS);
tExecPlanFragmentParams.getQueryOptions().setQueryTimeout((int) timeoutS);
tExecPlanFragmentParams.getQueryOptions().setExecutionTimeout((int) timeoutS);

if (Config.enable_workload_group) {
long wgId = routineLoadJob.getWorkloadId();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -739,18 +739,6 @@ public void processTimeoutTasks() {
// and after renew, the previous task is removed from routineLoadTaskInfoList,
// so task can no longer be committed successfully.
// the already committed task will not be handled here.
int timeoutBackOffCount = routineLoadTaskInfo.getTimeoutBackOffCount();
if (timeoutBackOffCount > RoutineLoadTaskInfo.MAX_TIMEOUT_BACK_OFF_COUNT) {
try {
updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TIMEOUT_TOO_MUCH,
"task " + routineLoadTaskInfo.getId() + " timeout too much"), false);
} catch (UserException e) {
LOG.warn("update job state to pause failed", e);
}
return;
}
routineLoadTaskInfo.setTimeoutBackOffCount(timeoutBackOffCount + 1);
routineLoadTaskInfo.setTimeoutMs((routineLoadTaskInfo.getTimeoutMs() << 1));
RoutineLoadTaskInfo newTask = unprotectRenewTask(routineLoadTaskInfo);
Env.getCurrentEnv().getRoutineLoadTaskScheduler().addTaskInQueue(newTask);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,28 +73,23 @@ public abstract class RoutineLoadTaskInfo {

protected boolean isMultiTable = false;

protected static final int MAX_TIMEOUT_BACK_OFF_COUNT = 3;
protected int timeoutBackOffCount = 0;

protected boolean isEof = false;

// this status will be set when corresponding transaction's status is changed.
// so that user or other logic can know the status of the corresponding txn.
protected TransactionStatus txnStatus = TransactionStatus.UNKNOWN;

public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs,
int timeoutBackOffCount, boolean isMultiTable) {
public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, boolean isMultiTable) {
this.id = id;
this.jobId = jobId;
this.createTimeMs = System.currentTimeMillis();
this.timeoutMs = timeoutMs;
this.timeoutBackOffCount = timeoutBackOffCount;
this.isMultiTable = isMultiTable;
}

public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, int timeoutBackOffCount,
long previousBeId, boolean isMultiTable) {
this(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable);
public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, long previousBeId,
boolean isMultiTable) {
this(id, jobId, timeoutMs, isMultiTable);
this.previousBeId = previousBeId;
}

Expand Down Expand Up @@ -138,10 +133,6 @@ public void setLastScheduledTime(long lastScheduledTime) {
this.lastScheduledTime = lastScheduledTime;
}

public void setTimeoutMs(long timeoutMs) {
this.timeoutMs = timeoutMs;
}

public long getTimeoutMs() {
return timeoutMs;
}
Expand All @@ -154,14 +145,6 @@ public TransactionStatus getTxnStatus() {
return txnStatus;
}

public void setTimeoutBackOffCount(int timeoutBackOffCount) {
this.timeoutBackOffCount = timeoutBackOffCount;
}

public int getTimeoutBackOffCount() {
return timeoutBackOffCount;
}

public boolean getIsEof() {
return isEof;
}
Expand All @@ -173,33 +156,17 @@ public boolean isTimeout() {
}

if (isRunning() && System.currentTimeMillis() - executeStartTimeMs > timeoutMs) {
LOG.info("task {} is timeout. start: {}, timeout: {}, timeoutBackOffCount: {}", DebugUtil.printId(id),
executeStartTimeMs, timeoutMs, timeoutBackOffCount);
LOG.info("task {} is timeout. start: {}, timeout: {}", DebugUtil.printId(id),
executeStartTimeMs, timeoutMs);
return true;
}
return false;
}

public void handleTaskByTxnCommitAttachment(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
selfAdaptTimeout(rlTaskTxnCommitAttachment);
judgeEof(rlTaskTxnCommitAttachment);
}

private void selfAdaptTimeout(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
long taskExecutionTime = rlTaskTxnCommitAttachment.getTaskExecutionTimeMs();
long timeoutMs = this.timeoutMs;

while (this.timeoutBackOffCount > 0) {
timeoutMs = timeoutMs >> 1;
if (timeoutMs <= taskExecutionTime) {
this.timeoutMs = timeoutMs << 1;
return;
}
this.timeoutBackOffCount--;
}
this.timeoutMs = timeoutMs;
}

private void judgeEof(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
RoutineLoadJob routineLoadJob = routineLoadManager.getJob(jobId);
if (rlTaskTxnCommitAttachment.getTotalRows() < routineLoadJob.getMaxBatchRows()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public void testProcessTimeOutTasks(@Injectable GlobalTransactionMgr globalTrans
Map<Integer, Long> partitionIdsToOffset = Maps.newHashMap();
partitionIdsToOffset.put(100, 0L);
KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(new UUID(1, 1), 1L,
maxBatchIntervalS * 2 * 1000, 0, partitionIdsToOffset, false);
maxBatchIntervalS * 2 * 1000, partitionIdsToOffset, false);
kafkaTaskInfo.setExecuteStartTimeMs(System.currentTimeMillis() - maxBatchIntervalS * 2 * 1000 - 1);
routineLoadTaskInfoList.add(kafkaTaskInfo);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public void testRunOneCycle(@Injectable KafkaRoutineLoadJob kafkaRoutineLoadJob1
Deencapsulation.setField(kafkaProgress, "partitionIdToOffset", partitionIdToOffset);

LinkedBlockingDeque<RoutineLoadTaskInfo> routineLoadTaskInfoQueue = new LinkedBlockingDeque<>();
KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000,
partitionIdToOffset, false);
routineLoadTaskInfoQueue.addFirst(routineLoadTaskInfo1);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ public void testCommitRoutineLoadTransaction(@Injectable TabletCommitInfo tablet
List<RoutineLoadTaskInfo> routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList");
Map<Integer, Long> partitionIdToOffset = Maps.newHashMap();
partitionIdToOffset.put(1, 0L);
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000,
partitionIdToOffset, false);
Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L);
routineLoadTaskInfoList.add(routineLoadTaskInfo);
Expand Down Expand Up @@ -396,7 +396,7 @@ public void testCommitRoutineLoadTransactionWithErrorMax(@Injectable TabletCommi
List<RoutineLoadTaskInfo> routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList");
Map<Integer, Long> partitionIdToOffset = Maps.newHashMap();
partitionIdToOffset.put(1, 0L);
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000,
partitionIdToOffset, false);
Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L);
routineLoadTaskInfoList.add(routineLoadTaskInfo);
Expand Down

0 comments on commit 2e4fd59

Please sign in to comment.