Skip to content

Commit

Permalink
Remove debug
Browse files Browse the repository at this point in the history
  • Loading branch information
lewtun committed Feb 21, 2025
1 parent e5fec7a commit f581741
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 2 deletions.
3 changes: 3 additions & 0 deletions src/lighteval/main_vllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,9 @@ def vllm(
metric_options = {}

model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")}

print(f"{model_args_dict=}")
print(f"{generation_parameters=}")
model_config = VLLMModelConfig(**model_args_dict, generation_parameters=generation_parameters)

pipeline = Pipeline(
Expand Down
2 changes: 0 additions & 2 deletions src/lighteval/models/vllm/vllm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,6 @@ def _generate(
generate: bool = True,
) -> list[GenerativeResponse]:
"""Contains the actual logic of the generation."""
print(f"{self.sampling_params.clone()=}")
sampling_params = self.sampling_params.clone() or SamplingParams()
if generate:
sampling_params.n = num_samples
Expand All @@ -344,7 +343,6 @@ def _generate(
# as VLLM complains about no GPUs available.
@ray.remote(num_gpus=1 if self.tensor_parallel_size == 1 else None)
def run_inference_one_model(model_args: dict, sampling_params: SamplingParams, requests):
print(f"Sampling params: {sampling_params}")
llm = LLM(**model_args)
return llm.generate(prompt_token_ids=requests, sampling_params=sampling_params)

Expand Down

0 comments on commit f581741

Please sign in to comment.