Skip to content

Commit

Permalink
Update doc and tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Fridge003 committed Jan 31, 2025
1 parent 70d98a7 commit fe5296a
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 20 deletions.
3 changes: 3 additions & 0 deletions benchmark/lora/lora_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,9 @@ async def benchmark(
"Output token throughput (tok/s):", metrics.output_throughput
)
)
print(
"{:<40} {:<10.2f}".format("Total throughput (tok/s):", metrics.total_throughput)
)
print("{s:{c}^{n}}".format(s="End-to-End Latency", n=50, c="-"))
print(
"{:<40} {:<10.2f}".format("Mean E2E Latency (ms):", metrics.mean_e2e_latency_ms)
Expand Down
1 change: 1 addition & 0 deletions docs/backend/server_arguments.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ Please consult the documentation below to learn more about the parameters you ma

* `lora_paths`: You may provide a list of adapters to your model as a list. Each batch element will get model response with the corresponding lora adapter applied. Currently `cuda_graph` and `radix_attention` are not supportet with this option so you need to disable them manually. We are still working on through these [issues](https://github.com/sgl-project/sglang/issues/2929).
* `max_loras_per_batch`: Maximum number of LoRAs in a running batch including base model.
* `lora_backend`: The backend of running GEMM kernels for Lora modules, can be one of `triton` or `flashinfer`. Defaults to be `triton`.

## Kernel backend

Expand Down
54 changes: 34 additions & 20 deletions test/srt/models/test_lora_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import torch

from sglang.test.runners import HFRunner, SRTRunner
from sglang.test.test_utils import calculate_rouge_l

LORA_SETS = [
{"base": "meta-llama/Llama-2-7b-hf", "loras": ["winddude/wizardLM-LlaMA-LoRA-7B"]},
Expand All @@ -26,27 +27,24 @@
TORCH_DTYPES = [torch.float16]

PROMPTS = [
"AI is a field of computer science focused on",
"""
### Instruction:
Write a poem about the transformers Python library.
Mention the word "large language models" in that poem.
### Response:
The Transformers are large language models,
They're used to make predictions on text.
""",
"""
### Instruction:
Tell me about llamas and alpacas
### Response:
Llamas are large, long-necked animals with a woolly coat. They have two toes on each foot instead of three like other camelids (camels, dromedaries). Llamas live in the Andean mountains of South America where they graze on grasses and shrubs. Alpaca is another name for domesticated llama. The word "alpaca" comes from an Incan language meaning "golden fleece." Alpacas look very similar to llamas but are smaller than their wild relatives. Both species were used by ancient people as pack animals and for meat. Today both llamas and alpacas are raised primarily for their fiber which can be spun into yarn or knitted into clothing.
### Question 2:
What do you know about llamas?
### Answer:
""",
### Instruction:
Tell me about llamas and alpacas
### Response:
Llamas are large, long-necked animals with a woolly coat. They have two toes on each foot instead of three like other camelids (camels, dromedaries). Llamas live in the Andean mountains of South America where they graze on grasses and shrubs. Alpaca is another name for domesticated llama. The word "alpaca" comes from an Incan language meaning "golden fleece." Alpacas look very similar to llamas but are smaller than their wild relatives. Both species were used by ancient people as pack animals and for meat. Today both llamas and alpacas are raised primarily for their fiber which can be spun into yarn or knitted into clothing.
### Question 2:
What do you know about llamas?
### Answer:
""",
]

BACKENDS = ["triton", "flashinfer"]

prefill_tolerance: float = 5e-2
decode_tolerance: float = 5e-2
rouge_l_tolerance: float = 1


class TestLoRABackend(unittest.TestCase):

Expand Down Expand Up @@ -129,6 +127,13 @@ def run_backend(
"max input diff between hf_lora and hf_base",
torch.max(abs(hf_logprobs - hf_no_lora_logprobs)),
)
if hf_logprobs.shape[0] <= 100:
assert torch.all(abs(hf_logprobs - srt_logprobs) < prefill_tolerance), (
f"prefill logprobs are not all close with model_path={base_path},"
f"lora_path={batch_lora_paths[i]}, backend={backend}, prompt={prompts[i]}"
f"prefill_tolerance={prefill_tolerance}."
f"{hf_logprobs=}, {srt_logprobs=}"
)

# compare output logprobs
hf_logprobs = torch.Tensor(hf_outputs.top_output_logprobs[i])
Expand All @@ -138,15 +143,24 @@ def run_backend(
torch.max(abs(hf_logprobs - srt_logprobs)),
"\n",
)
if hf_logprobs.shape[0] <= 100:
assert torch.all(abs(hf_logprobs - srt_logprobs) < decode_tolerance), (
f"decode logprobs are not all close with model_path={base_path},"
f"lora_path={batch_lora_paths[i]}, backend={backend}, prompt={prompts[i]}"
f"decode_tolerance={decode_tolerance}."
f"{hf_logprobs=}, {srt_logprobs=}"
)

# compare output strings
srt_output_str = srt_outputs.output_strs[i]
srt_output_str = srt_outputs.output_strs[i].strip(" ")
hf_output_str = hf_outputs.output_strs[i]
print(f"srt_output_str={srt_output_str}")
print(f"hf_output_str={hf_output_str}")
print(f"srt_no_lora_output_str={srt_no_lora_outputs.output_strs[i]}")
print(f"hf_no_lora_output_str={hf_no_lora_outputs.output_strs[i]}")
assert srt_output_str == hf_output_str
rouge_l_scores = calculate_rouge_l([srt_output_str], [hf_output_str])
print(f"{rouge_l_scores=}")
assert (
rouge_l_scores[0] >= rouge_l_tolerance
), f"ROUGE-L scores of prompt {i} outputs are greater than rouge_l_tolerance={rouge_l_tolerance}"

def test_all(self):
for lora_set in LORA_SETS:
Expand Down

0 comments on commit fe5296a

Please sign in to comment.