Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[opt](routine load) optimize routine load timeout logic (#40818) #41135

Merged
merged 1 commit into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1198,6 +1198,12 @@ public class Config extends ConfigBase {
@ConfField(mutable = true, masterOnly = true)
public static int max_routine_load_task_num_per_be = 1024;

/**
* routine load timeout is equal to maxBatchIntervalS * routine_load_task_timeout_multiplier.
*/
@ConfField(mutable = true, masterOnly = true)
public static int routine_load_task_timeout_multiplier = 10;

/**
* the max timeout of get kafka meta.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ public void divideRoutineLoadJob(int currentConcurrentTaskNum) throws UserExcept
((KafkaProgress) progress).getOffsetByPartition(kafkaPartition));
}
KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), id,
maxBatchIntervalS * 2 * 1000, 0, taskKafkaProgress, isMultiTable());
maxBatchIntervalS * Config.routine_load_task_timeout_multiplier * 1000,
taskKafkaProgress, isMultiTable());
routineLoadTaskInfoList.add(kafkaTaskInfo);
result.add(kafkaTaskInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,14 @@ public class KafkaTaskInfo extends RoutineLoadTaskInfo {
private Map<Integer, Long> partitionIdToOffset;

public KafkaTaskInfo(UUID id, long jobId,
long timeoutMs, int timeoutBackOffCount,
Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable);
long timeoutMs, Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(id, jobId, timeoutMs, isMultiTable);
this.partitionIdToOffset = partitionIdToOffset;
}

public KafkaTaskInfo(KafkaTaskInfo kafkaTaskInfo, Map<Integer, Long> partitionIdToOffset, boolean isMultiTable) {
super(UUID.randomUUID(), kafkaTaskInfo.getJobId(),
kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getTimeoutBackOffCount(),
kafkaTaskInfo.getBeId(), isMultiTable);
kafkaTaskInfo.getTimeoutMs(), kafkaTaskInfo.getBeId(), isMultiTable);
this.partitionIdToOffset = partitionIdToOffset;
this.isEof = kafkaTaskInfo.getIsEof();
}
Expand Down Expand Up @@ -140,11 +138,6 @@ private TExecPlanFragmentParams rePlan(RoutineLoadJob routineLoadJob) throws Use
TExecPlanFragmentParams tExecPlanFragmentParams = routineLoadJob.plan(planner, loadId, txnId);
TPlanFragment tPlanFragment = tExecPlanFragmentParams.getFragment();
tPlanFragment.getOutputSink().getOlapTableSink().setTxnId(txnId);
// it needs update timeout to make task timeout backoff work
long timeoutS = this.getTimeoutMs() / 1000;
tPlanFragment.getOutputSink().getOlapTableSink().setLoadChannelTimeoutS(timeoutS);
tExecPlanFragmentParams.getQueryOptions().setQueryTimeout((int) timeoutS);
tExecPlanFragmentParams.getQueryOptions().setExecutionTimeout((int) timeoutS);

if (Config.enable_workload_group) {
long wgId = routineLoadJob.getWorkloadId();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -739,18 +739,6 @@ public void processTimeoutTasks() {
// and after renew, the previous task is removed from routineLoadTaskInfoList,
// so task can no longer be committed successfully.
// the already committed task will not be handled here.
int timeoutBackOffCount = routineLoadTaskInfo.getTimeoutBackOffCount();
if (timeoutBackOffCount > RoutineLoadTaskInfo.MAX_TIMEOUT_BACK_OFF_COUNT) {
try {
updateState(JobState.PAUSED, new ErrorReason(InternalErrorCode.TIMEOUT_TOO_MUCH,
"task " + routineLoadTaskInfo.getId() + " timeout too much"), false);
} catch (UserException e) {
LOG.warn("update job state to pause failed", e);
}
return;
}
routineLoadTaskInfo.setTimeoutBackOffCount(timeoutBackOffCount + 1);
routineLoadTaskInfo.setTimeoutMs((routineLoadTaskInfo.getTimeoutMs() << 1));
RoutineLoadTaskInfo newTask = unprotectRenewTask(routineLoadTaskInfo);
Env.getCurrentEnv().getRoutineLoadTaskScheduler().addTaskInQueue(newTask);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,28 +73,23 @@ public abstract class RoutineLoadTaskInfo {

protected boolean isMultiTable = false;

protected static final int MAX_TIMEOUT_BACK_OFF_COUNT = 3;
protected int timeoutBackOffCount = 0;

protected boolean isEof = false;

// this status will be set when corresponding transaction's status is changed.
// so that user or other logic can know the status of the corresponding txn.
protected TransactionStatus txnStatus = TransactionStatus.UNKNOWN;

public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs,
int timeoutBackOffCount, boolean isMultiTable) {
public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, boolean isMultiTable) {
this.id = id;
this.jobId = jobId;
this.createTimeMs = System.currentTimeMillis();
this.timeoutMs = timeoutMs;
this.timeoutBackOffCount = timeoutBackOffCount;
this.isMultiTable = isMultiTable;
}

public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, int timeoutBackOffCount,
long previousBeId, boolean isMultiTable) {
this(id, jobId, timeoutMs, timeoutBackOffCount, isMultiTable);
public RoutineLoadTaskInfo(UUID id, long jobId, long timeoutMs, long previousBeId,
boolean isMultiTable) {
this(id, jobId, timeoutMs, isMultiTable);
this.previousBeId = previousBeId;
}

Expand Down Expand Up @@ -138,10 +133,6 @@ public void setLastScheduledTime(long lastScheduledTime) {
this.lastScheduledTime = lastScheduledTime;
}

public void setTimeoutMs(long timeoutMs) {
this.timeoutMs = timeoutMs;
}

public long getTimeoutMs() {
return timeoutMs;
}
Expand All @@ -154,14 +145,6 @@ public TransactionStatus getTxnStatus() {
return txnStatus;
}

public void setTimeoutBackOffCount(int timeoutBackOffCount) {
this.timeoutBackOffCount = timeoutBackOffCount;
}

public int getTimeoutBackOffCount() {
return timeoutBackOffCount;
}

public boolean getIsEof() {
return isEof;
}
Expand All @@ -173,33 +156,17 @@ public boolean isTimeout() {
}

if (isRunning() && System.currentTimeMillis() - executeStartTimeMs > timeoutMs) {
LOG.info("task {} is timeout. start: {}, timeout: {}, timeoutBackOffCount: {}", DebugUtil.printId(id),
executeStartTimeMs, timeoutMs, timeoutBackOffCount);
LOG.info("task {} is timeout. start: {}, timeout: {}", DebugUtil.printId(id),
executeStartTimeMs, timeoutMs);
return true;
}
return false;
}

public void handleTaskByTxnCommitAttachment(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
selfAdaptTimeout(rlTaskTxnCommitAttachment);
judgeEof(rlTaskTxnCommitAttachment);
}

private void selfAdaptTimeout(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
long taskExecutionTime = rlTaskTxnCommitAttachment.getTaskExecutionTimeMs();
long timeoutMs = this.timeoutMs;

while (this.timeoutBackOffCount > 0) {
timeoutMs = timeoutMs >> 1;
if (timeoutMs <= taskExecutionTime) {
this.timeoutMs = timeoutMs << 1;
return;
}
this.timeoutBackOffCount--;
}
this.timeoutMs = timeoutMs;
}

private void judgeEof(RLTaskTxnCommitAttachment rlTaskTxnCommitAttachment) {
RoutineLoadJob routineLoadJob = routineLoadManager.getJob(jobId);
if (rlTaskTxnCommitAttachment.getTotalRows() < routineLoadJob.getMaxBatchRows()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public void testProcessTimeOutTasks(@Injectable GlobalTransactionMgr globalTrans
Map<Integer, Long> partitionIdsToOffset = Maps.newHashMap();
partitionIdsToOffset.put(100, 0L);
KafkaTaskInfo kafkaTaskInfo = new KafkaTaskInfo(new UUID(1, 1), 1L,
maxBatchIntervalS * 2 * 1000, 0, partitionIdsToOffset, false);
maxBatchIntervalS * 2 * 1000, partitionIdsToOffset, false);
kafkaTaskInfo.setExecuteStartTimeMs(System.currentTimeMillis() - maxBatchIntervalS * 2 * 1000 - 1);
routineLoadTaskInfoList.add(kafkaTaskInfo);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public void testRunOneCycle(@Injectable KafkaRoutineLoadJob kafkaRoutineLoadJob1
Deencapsulation.setField(kafkaProgress, "partitionIdToOffset", partitionIdToOffset);

LinkedBlockingDeque<RoutineLoadTaskInfo> routineLoadTaskInfoQueue = new LinkedBlockingDeque<>();
KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo1 = new KafkaTaskInfo(new UUID(1, 1), 1L, 20000,
partitionIdToOffset, false);
routineLoadTaskInfoQueue.addFirst(routineLoadTaskInfo1);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ public void testCommitRoutineLoadTransaction(@Injectable TabletCommitInfo tablet
List<RoutineLoadTaskInfo> routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList");
Map<Integer, Long> partitionIdToOffset = Maps.newHashMap();
partitionIdToOffset.put(1, 0L);
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000,
partitionIdToOffset, false);
Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L);
routineLoadTaskInfoList.add(routineLoadTaskInfo);
Expand Down Expand Up @@ -396,7 +396,7 @@ public void testCommitRoutineLoadTransactionWithErrorMax(@Injectable TabletCommi
List<RoutineLoadTaskInfo> routineLoadTaskInfoList = Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList");
Map<Integer, Long> partitionIdToOffset = Maps.newHashMap();
partitionIdToOffset.put(1, 0L);
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000, 0,
KafkaTaskInfo routineLoadTaskInfo = new KafkaTaskInfo(UUID.randomUUID(), 1L, 20000,
partitionIdToOffset, false);
Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L);
routineLoadTaskInfoList.add(routineLoadTaskInfo);
Expand Down
Loading