Skip to content

Commit

Permalink
Reset the interruptSignal after acquiring the lock in completion and …
Browse files Browse the repository at this point in the history
…chatCompletion. It was doing this in effect within asyncGenerate, and kind of in generate, but the completion code would never call generate if interruptGenerate was ever called in the past because it would be stuck true. Also do it in a way that allows users to still call interruptGenerate during a context switch of acquiring the model lock.
  • Loading branch information
mitchellw committed Oct 4, 2024
1 parent f142da7 commit 95071e9
Showing 1 changed file with 12 additions and 0 deletions.
12 changes: 12 additions & 0 deletions src/engine.ts
Original file line number Diff line number Diff line change
Expand Up @@ -745,6 +745,8 @@ export class MLCEngine implements MLCEngineInterface {
async chatCompletion(
request: ChatCompletionRequest,
): Promise<AsyncIterable<ChatCompletionChunk> | ChatCompletion> {
const initialInterruptSignal = this.interruptSignal;

// 0. Check model loaded and preprocess inputs
const [selectedModelId, selectedPipeline, selectedChatConfig] =
this.getLLMStates("ChatCompletionRequest", request.model);
Expand All @@ -771,6 +773,10 @@ export class MLCEngine implements MLCEngineInterface {
// 0.5 Block wait until this pipeline finishes all previous requests
const lock = this.loadedModelIdToLock.get(selectedModelId)!;
await lock.acquire();
// If interruptGenerate called during the wait for lock, respect that. But reset if locked from a previous call to interruptGenerate
if (initialInterruptSignal && this.interruptSignal) {
this.interruptSignal = false;
}

// 1. If request is streaming, return an AsyncIterable (an iterable version of `_generate()`)
if (request.stream) {
Expand Down Expand Up @@ -901,6 +907,8 @@ export class MLCEngine implements MLCEngineInterface {
async completion(
request: CompletionCreateParams,
): Promise<AsyncIterable<Completion> | Completion> {
const initialInterruptSignal = this.interruptSignal;

// 0. Check model loaded and preprocess inputs
const [selectedModelId, selectedPipeline, selectedChatConfig] =
this.getLLMStates("CompletionCreateParams", request.model);
Expand All @@ -920,6 +928,10 @@ export class MLCEngine implements MLCEngineInterface {
// 0.5 Block wait until this pipeline finishes all previous requests
const lock = this.loadedModelIdToLock.get(selectedModelId)!;
await lock.acquire();
// If interruptGenerate called during the wait for lock, respect that. But reset if locked from a previous call to interruptGenerate
if (initialInterruptSignal && this.interruptSignal) {
this.interruptSignal = false;
}

// 1. If request is streaming, return an AsyncIterable (an iterable version of `_generate()`)
if (request.stream) {
Expand Down

0 comments on commit 95071e9

Please sign in to comment.