Skip to content

Commit

Permalink
Add --network-mode option
Browse files Browse the repository at this point in the history
Signed-off-by: Joshua Stone <[email protected]>
  • Loading branch information
rhjostone committed Feb 1, 2025
1 parent adc3bbe commit ed4f322
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 1 deletion.
3 changes: 3 additions & 0 deletions docs/ramalama-bench.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you
#### **--help**, **-h**
show this help message and exit

#### **--network-mode**=*none*
set the network mode for the container

## DESCRIPTION
Benchmark specified AI Model.

Expand Down
3 changes: 3 additions & 0 deletions docs/ramalama-convert.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ type of OCI Model Image to convert.
| car | Includes base image with the model stored in a /models subdir |
| raw | Only the model and a link file model.file to it stored at / |

#### **--network-mode**=*none*
sets the configuration for network namespaces when handling RUN instructions

## EXAMPLE

Generate an oci model out of an Ollama model.
Expand Down
3 changes: 3 additions & 0 deletions docs/ramalama-run.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ llama.cpp explains this as:
#### **--tls-verify**=*true*
require HTTPS and verify certificates when contacting OCI registries

#### **--network-mode**=*none*
set the network mode for the container

## DESCRIPTION
Run specified AI Model as a chat bot. RamaLama pulls specified AI Model from
registry if it does not exist in local storage. By default a prompt for a chat
Expand Down
3 changes: 3 additions & 0 deletions docs/ramalama-serve.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ IP address for llama.cpp to listen on.
#### **--name**, **-n**
Name of the container to run the Model in.

#### **--network-mode**=*default*
set the network mode for the container

#### **--port**, **-p**
port for AI Model server to listen on

Expand Down
33 changes: 33 additions & 0 deletions ramalama/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,12 @@ def bench_cli(args):

def bench_parser(subparsers):
parser = subparsers.add_parser("bench", aliases=["benchmark"], help="benchmark specified AI Model")
parser.add_argument(
"--network-mode",
type=str,
default="none",
help="set the network mode for the container",
)
parser.add_argument("MODEL") # positional argument
parser.set_defaults(func=bench_cli)

Expand Down Expand Up @@ -600,6 +606,13 @@ def convert_parser(subparsers):
Model "car" includes base image with the model stored in a /models subdir.
Model "raw" contains the model and a link file model.file to it stored at /.""",
)
# https://docs.podman.io/en/latest/markdown/podman-build.1.html#network-mode-net
parser.add_argument(
"--network-mode",
type=str,
default="none",
help="sets the configuration for network namespaces when handling RUN instructions",
)
parser.add_argument("SOURCE") # positional argument
parser.add_argument("TARGET") # positional argument
parser.set_defaults(func=convert_cli)
Expand Down Expand Up @@ -717,6 +730,15 @@ def _run(parser):
def run_parser(subparsers):
parser = subparsers.add_parser("run", help="run specified AI Model as a chatbot")
_run(parser)
# Disable network access by default, and give the option to pass any supported network mode into
# podman if needed:
# https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net
parser.add_argument(
"--network-mode",
type=str,
default="none",
help="set the network mode for the container",
)
parser.add_argument("MODEL") # positional argument
parser.add_argument(
"ARGS", nargs="*", help="Overrides the default prompt, and the output is returned without entering the chatbot"
Expand All @@ -742,6 +764,17 @@ def serve_parser(subparsers):
parser.add_argument(
"-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on"
)
# --network-mode=default lets the container listen on localhost, and is an option that's compatible
# with podman and docker. It should use the bridge driver for rootful podman, the pasta driver for
# rootless podman, and the bridge driver for docker:
# https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net
# https://docs.docker.com/engine/network/#drivers
parser.add_argument(
"--network-mode",
type=str,
default="default",
help="set the network mode for the container",
)
parser.add_argument("MODEL") # positional argument
parser.set_defaults(func=serve_cli)

Expand Down
1 change: 1 addition & 0 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ def setup_container(self, args):
"-i",
"--label",
"RAMALAMA",
f"--network={args.network_mode}",
"--security-opt=label=disable",
"--name",
name,
Expand Down
2 changes: 1 addition & 1 deletion ramalama/oci.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def build(self, source, target, args):
else:
c.write(model_raw)
imageid = (
run_cmd([self.conman, "build", "--no-cache", "-q", "-f", containerfile.name, contextdir], debug=args.debug)
run_cmd([self.conman, "build", "--no-cache", f"--network={args.network_mode}", "-q", "-f", containerfile.name, contextdir], debug=args.debug)
.stdout.decode("utf-8")
.strip()
)
Expand Down

0 comments on commit ed4f322

Please sign in to comment.