diff --git a/docs/ramalama-bench.1.md b/docs/ramalama-bench.1.md index 87d51acf..c94435f3 100644 --- a/docs/ramalama-bench.1.md +++ b/docs/ramalama-bench.1.md @@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you #### **--help**, **-h** show this help message and exit +#### **--network-mode**=*none* +set the network mode for the container + ## DESCRIPTION Benchmark specified AI Model. diff --git a/docs/ramalama-convert.1.md b/docs/ramalama-convert.1.md index 19fad843..9feb7857 100644 --- a/docs/ramalama-convert.1.md +++ b/docs/ramalama-convert.1.md @@ -25,6 +25,9 @@ type of OCI Model Image to convert. | car | Includes base image with the model stored in a /models subdir | | raw | Only the model and a link file model.file to it stored at / | +#### **--network-mode**=*none* +sets the configuration for network namespaces when handling RUN instructions + ## EXAMPLE Generate an oci model out of an Ollama model. diff --git a/docs/ramalama-run.1.md b/docs/ramalama-run.1.md index e0a69bd6..b318e4a1 100644 --- a/docs/ramalama-run.1.md +++ b/docs/ramalama-run.1.md @@ -53,6 +53,9 @@ llama.cpp explains this as: #### **--tls-verify**=*true* require HTTPS and verify certificates when contacting OCI registries +#### **--network-mode**=*none* +set the network mode for the container + ## DESCRIPTION Run specified AI Model as a chat bot. RamaLama pulls specified AI Model from registry if it does not exist in local storage. By default a prompt for a chat diff --git a/docs/ramalama-serve.1.md b/docs/ramalama-serve.1.md index 49840ef7..972e894b 100644 --- a/docs/ramalama-serve.1.md +++ b/docs/ramalama-serve.1.md @@ -64,6 +64,9 @@ IP address for llama.cpp to listen on. #### **--name**, **-n** Name of the container to run the Model in. +#### **--network-mode**=*bridge* +set the network mode for the container + #### **--port**, **-p** port for AI Model server to listen on diff --git a/ramalama/cli.py b/ramalama/cli.py index 0b3577b4..828afbe2 100644 --- a/ramalama/cli.py +++ b/ramalama/cli.py @@ -379,6 +379,12 @@ def bench_cli(args): def bench_parser(subparsers): parser = subparsers.add_parser("bench", aliases=["benchmark"], help="benchmark specified AI Model") + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=bench_cli) @@ -600,6 +606,13 @@ def convert_parser(subparsers): Model "car" includes base image with the model stored in a /models subdir. Model "raw" contains the model and a link file model.file to it stored at /.""", ) + # https://docs.podman.io/en/latest/markdown/podman-build.1.html#network-mode-net + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="sets the configuration for network namespaces when handling RUN instructions", + ) parser.add_argument("SOURCE") # positional argument parser.add_argument("TARGET") # positional argument parser.set_defaults(func=convert_cli) @@ -717,6 +730,15 @@ def _run(parser): def run_parser(subparsers): parser = subparsers.add_parser("run", help="run specified AI Model as a chatbot") _run(parser) + # Disable network access by default, and give the option to pass any supported network mode into + # podman if needed: + # https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.add_argument( "ARGS", nargs="*", help="Overrides the default prompt, and the output is returned without entering the chatbot" @@ -742,6 +764,16 @@ def serve_parser(subparsers): parser.add_argument( "-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on" ) + # --network-mode=bridge lets the container listen on localhost, and is an option that's compatible + # with podman and docker: + # https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net + # https://docs.docker.com/engine/network/#drivers + parser.add_argument( + "--network-mode", + type=str, + default="bridge", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=serve_cli) diff --git a/ramalama/model.py b/ramalama/model.py index 710a27ac..6417090b 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -153,6 +153,7 @@ def setup_container(self, args): "-i", "--label", "RAMALAMA", + f"--network={args.network_mode}", "--security-opt=label=disable", "--name", name, diff --git a/ramalama/oci.py b/ramalama/oci.py index 6f5e50ff..0d705174 100644 --- a/ramalama/oci.py +++ b/ramalama/oci.py @@ -174,7 +174,7 @@ def build(self, source, target, args): else: c.write(model_raw) imageid = ( - run_cmd([self.conman, "build", "--no-cache", "-q", "-f", containerfile.name, contextdir], debug=args.debug) + run_cmd([self.conman, "build", "--no-cache", f"--network={args.network_mode}", "-q", "-f", containerfile.name, contextdir], debug=args.debug) .stdout.decode("utf-8") .strip() )