Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Get Tokens Per Second metric #1

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions backend/backend.proto
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ service Backend {
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {}
// expose tokens_per_second metric
rpc GetMetrics(HealthMessage) returns (MetricsResponse);

rpc StoresSet(StoresSetOptions) returns (Result) {}
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
Expand All @@ -28,6 +30,12 @@ service Backend {
rpc Rerank(RerankRequest) returns (RerankResult) {}
}

message MetricsResponse {
float tokens_per_second = 1;
int32 tokens_generated = 2;
int32 prompt_tokens_processed = 3;
}

message RerankRequest {
string query = 1;
repeated string documents = 2;
Expand Down
34 changes: 33 additions & 1 deletion backend/cpp/llama/grpc-server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,16 @@ struct llama_server_context
}
}

llama_client_slot* get_active_slot() {
for (llama_client_slot& slot : slots) {
// Check if the slot is currently processing
if (slot.is_processing()) {
return &slot; // Return the active slot
}
}
return nullptr; // No active slot found
}

bool load_model(const gpt_params &params_)
{
params = params_;
Expand Down Expand Up @@ -2031,9 +2041,10 @@ inline void signal_handler(int signal) { shutdown_handler(signal); }
bool loaded_model; // TODO: add a mutex for this, but happens only once loading the model

// The class has a llama instance that is shared across all RPCs
llama_server_context llama;
static llama_server_context llama;

static void start_llama_server() {

// Wait for model to be loaded first
while (!loaded_model) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
Expand Down Expand Up @@ -2406,6 +2417,27 @@ class BackendServiceImpl final : public backend::Backend::Service {

return grpc::Status::OK;
}

grpc::Status GetMetrics(ServerContext* context, const backend::HealthMessage* request, backend::MetricsResponse* response) {
llama_client_slot* active_slot = llama.get_active_slot();

if (active_slot != nullptr) {
// Calculate the tokens per second using existing logic
double tokens_per_second = 1e3 / active_slot->t_token_generation * active_slot->n_decoded;

// Populate the response with metrics
response->set_tokens_per_second(tokens_per_second);
response->set_tokens_generated(active_slot->n_decoded);
response->set_prompt_tokens_processed(active_slot->num_prompt_tokens_processed);
} else {
// Handle case when no active slot exists
response->set_tokens_per_second(0);
response->set_tokens_generated(0);
response->set_prompt_tokens_processed(0);
}

return grpc::Status::OK;
}
};

void RunServer(const std::string& server_address) {
Expand Down
1 change: 1 addition & 0 deletions pkg/grpc/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,5 @@ type Backend interface {
StoresFind(ctx context.Context, in *pb.StoresFindOptions, opts ...grpc.CallOption) (*pb.StoresFindResult, error)

Rerank(ctx context.Context, in *pb.RerankRequest, opts ...grpc.CallOption) (*pb.RerankResult, error)
GetTokenMetrics(ctx context.Context) (*pb.MetricsResponse, error)
}
26 changes: 26 additions & 0 deletions pkg/grpc/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,32 @@ func (c *Client) wdUnMark() {
}
}

func (c *Client) GetTokenMetrics(ctx context.Context) (*pb.MetricsResponse, error) {
if !c.parallel {
c.opMutex.Lock()
defer c.opMutex.Unlock()
}
c.setBusy(true)
defer c.setBusy(false)
conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, err
}
defer conn.Close()
client := pb.NewBackendClient(conn)

// The healthcheck call shouldn't take long time
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()

res, err := client.GetMetrics(ctx, &pb.HealthMessage{})
if err != nil {
return nil, err
}

return res, nil
}

func (c *Client) HealthCheck(ctx context.Context) (bool, error) {
if !c.parallel {
c.opMutex.Lock()
Expand Down
4 changes: 4 additions & 0 deletions pkg/grpc/embed.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ func (e *embedBackend) HealthCheck(ctx context.Context) (bool, error) {
return true, nil
}

func (e *embedBackend) GetTokenMetrics(ctx context.Context) (*pb.MetricsResponse, error) {
return e.s.GetMetrics(ctx, &pb.HealthMessage{})
}

func (e *embedBackend) Embeddings(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.EmbeddingResult, error) {
return e.s.Embedding(ctx, in)
}
Expand Down