From 2079e43beecc486a607c9d79ab691e0e4563aa11 Mon Sep 17 00:00:00 2001 From: Sebastian Schoennenbeck Date: Tue, 28 Jan 2025 11:56:45 +0100 Subject: [PATCH] [Core] Make raw_request optional in ServingCompletion (#12503) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sebastian Schönnenbeck --- vllm/entrypoints/openai/serving_completion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index b0179f78bd635..13c3926368890 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -58,7 +58,7 @@ def __init__( async def create_completion( self, request: CompletionRequest, - raw_request: Request, + raw_request: Optional[Request] = None, ) -> Union[AsyncGenerator[str, None], CompletionResponse, ErrorResponse]: """Completion API similar to OpenAI's API. @@ -137,7 +137,7 @@ async def create_completion( lora_request=lora_request, prompt_adapter_request=prompt_adapter_request) - trace_headers = (await + trace_headers = (None if raw_request is None else await self._get_trace_headers(raw_request.headers)) if isinstance(sampling_params, BeamSearchParams):