Skip to content

Commit

Permalink
[Core] Make raw_request optional in ServingCompletion (vllm-project#1…
Browse files Browse the repository at this point in the history
…2503)

Signed-off-by: Sebastian Schönnenbeck <[email protected]>
  • Loading branch information
schoennenbeck authored Jan 28, 2025
1 parent e29d435 commit 2079e43
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions vllm/entrypoints/openai/serving_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(
async def create_completion(
self,
request: CompletionRequest,
raw_request: Request,
raw_request: Optional[Request] = None,
) -> Union[AsyncGenerator[str, None], CompletionResponse, ErrorResponse]:
"""Completion API similar to OpenAI's API.
Expand Down Expand Up @@ -137,7 +137,7 @@ async def create_completion(
lora_request=lora_request,
prompt_adapter_request=prompt_adapter_request)

trace_headers = (await
trace_headers = (None if raw_request is None else await
self._get_trace_headers(raw_request.headers))

if isinstance(sampling_params, BeamSearchParams):
Expand Down

0 comments on commit 2079e43

Please sign in to comment.