Skip to content

Commit

Permalink
Test the case when max_new_tokens is very large (#1038)
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy authored Aug 11, 2024
1 parent d785412 commit d84c5e7
Show file tree
Hide file tree
Showing 7 changed files with 99 additions and 13 deletions.
4 changes: 1 addition & 3 deletions python/sglang/srt/managers/detokenizer_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
)
from sglang.srt.managers.schedule_batch import FINISH_MATCHED_STR
from sglang.srt.server_args import PortArgs, ServerArgs
from sglang.utils import find_printable_text, get_exception_traceback, graceful_registry
from sglang.utils import find_printable_text, get_exception_traceback

asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

Expand Down Expand Up @@ -164,8 +164,6 @@ def start_detokenizer_process(
port_args: PortArgs,
pipe_writer,
):
graceful_registry(inspect.currentframe().f_code.co_name)

try:
manager = DetokenizerManager(server_args, port_args)
except Exception:
Expand Down
7 changes: 5 additions & 2 deletions python/sglang/srt/managers/policy_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

"""Request policy scheduler"""

import os
import random
from collections import defaultdict
from contextlib import contextmanager
Expand All @@ -24,9 +25,11 @@
from sglang.srt.mem_cache.base_prefix_cache import BasePrefixCache
from sglang.srt.mem_cache.radix_cache import TreeNode

# Clip the max new tokens for the request whose max_new_tokens is very large.
# Clip the estimation of max_new_tokens for the request whose max_new_tokens is very large.
# This can prevent the server from being too conservative.
CLIP_MAX_NEW_TOKENS = 4096
# Note that this only clips the estimation in the scheduler but does not change the stop
# condition. The request can still generate tokens until it hits the unclipped max_new_tokens.
CLIP_MAX_NEW_TOKENS = int(os.environ.get("SGLANG_CLIP_MAX_NEW_TOKENS", "4096"))


class PolicyScheduler:
Expand Down
4 changes: 2 additions & 2 deletions python/sglang/srt/openai_api/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(self, filename: str, purpose: str):
batch_storage: Dict[str, BatchResponse] = {}
file_id_request: Dict[str, FileMetadata] = {}
file_id_response: Dict[str, FileResponse] = {}
# map file id to file path in SGlang backend
# map file id to file path in SGLang backend
file_id_storage: Dict[str, str] = {}


Expand Down Expand Up @@ -335,7 +335,7 @@ async def process_batch(tokenizer_manager, batch_id: str, batch_request: BatchRe
}

except Exception as e:
print("error in SGlang:", e)
print("error in SGLang:", e)
# Update batch status to "failed"
retrieve_batch = batch_storage[batch_id]
retrieve_batch.status = "failed"
Expand Down
2 changes: 1 addition & 1 deletion python/sglang/srt/server_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class ServerArgs:

# Other
api_key: Optional[str] = None
file_storage_pth: str = "SGlang_storage"
file_storage_pth: str = "SGLang_storage"

# Data parallelism
dp_size: int = 1
Expand Down
13 changes: 12 additions & 1 deletion python/sglang/test/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,8 @@ def popen_launch_server(
timeout: float,
api_key: Optional[str] = None,
other_args: tuple = (),
env: Optional[dict] = None,
return_stdout_stderr: bool = False,
):
_, host, port = base_url.split(":")
host = host[2:]
Expand All @@ -417,7 +419,16 @@ def popen_launch_server(
if api_key:
command += ["--api-key", api_key]

process = subprocess.Popen(command, stdout=None, stderr=None)
if return_stdout_stderr:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
text=True,
)
else:
process = subprocess.Popen(command, stdout=None, stderr=None, env=env)

start_time = time.time()
while time.time() - start_time < timeout:
Expand Down
10 changes: 6 additions & 4 deletions test/srt/run_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,15 @@

suites = {
"minimal": [
"test_chunked_prefill.py",
"test_embedding_openai_server.py",
"test_eval_accuracy.py",
"test_large_max_new_tokens.py",
"test_openai_server.py",
"test_vision_openai_server.py",
"test_embedding_openai_server.py",
"test_chunked_prefill.py",
"test_skip_tokenizer_init.py",
"test_torch_compile.py",
"test_models_from_modelscope.py",
"test_vision_openai_server.py",
"test_large_max_new_tokens.py",
"models/test_generation_models.py",
"models/test_embedding_models.py",
"sampling/penaltylib",
Expand Down
72 changes: 72 additions & 0 deletions test/srt/test_large_max_new_tokens.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import json
import os
import time
import unittest
from concurrent.futures import ThreadPoolExecutor

import openai

from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.srt.utils import kill_child_process
from sglang.test.test_utils import DEFAULT_MODEL_NAME_FOR_TEST, popen_launch_server


class TestOpenAIServer(unittest.TestCase):

@classmethod
def setUpClass(cls):
cls.model = DEFAULT_MODEL_NAME_FOR_TEST
cls.base_url = "http://127.0.0.1:8157"
cls.api_key = "sk-123456"
cls.process = popen_launch_server(
cls.model,
cls.base_url,
timeout=300,
api_key=cls.api_key,
other_args=("--max-total-token", "1024"),
env={"SGLANG_CLIP_MAX_NEW_TOKENS": "256", **os.environ},
return_stdout_stderr=True,
)
cls.base_url += "/v1"
cls.tokenizer = get_tokenizer(DEFAULT_MODEL_NAME_FOR_TEST)

@classmethod
def tearDownClass(cls):
kill_child_process(cls.process.pid)

def run_chat_completion(self):
client = openai.Client(api_key=self.api_key, base_url=self.base_url)
response = client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "You are a helpful AI assistant"},
{
"role": "user",
"content": "Please repeat the world 'hello' for 10000 times.",
},
],
temperature=0,
)
return response

def test_chat_completion(self):
num_requests = 4

futures = []
with ThreadPoolExecutor(16) as executor:
for i in range(num_requests):
futures.append(executor.submit(self.run_chat_completion))

all_requests_running = False
for line in iter(self.process.stderr.readline, ""):
line = str(line)
print(line, end="")
if f"#running-req: {num_requests}" in line:
all_requests_running = True
break

assert all_requests_running


if __name__ == "__main__":
unittest.main()

0 comments on commit d84c5e7

Please sign in to comment.