Skip to content

Commit

Permalink
support QuickGELU (#3250)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhyncs authored Feb 1, 2025
1 parent 4eb4b40 commit 8db776f
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 1 deletion.
9 changes: 9 additions & 0 deletions python/sglang/srt/layers/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,15 @@ def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
return out


class QuickGELU(CustomOp):
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
return x * torch.sigmoid(1.702 * x)

def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
# TODO(zhyncs): Implement the CUDA kernel for QuickGELU in sgl-kernel
return self.forward_native(x)


class ScaledActivation(nn.Module):
"""An activation function with post-scale parameters.
Expand Down
2 changes: 1 addition & 1 deletion python/sglang/srt/models/qwen2_vl.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from vllm.model_executor.layers.activation import QuickGELU

from sglang.srt.configs import Qwen2VLConfig, Qwen2VLVisionConfig
from sglang.srt.hf_transformers_utils import get_processor
from sglang.srt.layers.activation import QuickGELU
from sglang.srt.layers.attention.vision import VisionAttention
from sglang.srt.layers.linear import ColumnParallelLinear, RowParallelLinear
from sglang.srt.layers.logits_processor import LogitsProcessor
Expand Down

0 comments on commit 8db776f

Please sign in to comment.