From 1e71e7982d646b4e7de717ada3b4b227782d1dc4 Mon Sep 17 00:00:00 2001 From: Joshua Stone Date: Fri, 31 Jan 2025 10:47:43 -0500 Subject: [PATCH] Fix up docs and add manpage entries --- docs/ramalama-bench.1.md | 3 +++ docs/ramalama-convert.1.md | 3 +++ docs/ramalama-run.1.md | 3 +++ docs/ramalama-serve.1.md | 3 +++ ramalama/cli.py | 12 ++++++++---- 5 files changed, 20 insertions(+), 4 deletions(-) diff --git a/docs/ramalama-bench.1.md b/docs/ramalama-bench.1.md index 87d51acf..c94435f3 100644 --- a/docs/ramalama-bench.1.md +++ b/docs/ramalama-bench.1.md @@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you #### **--help**, **-h** show this help message and exit +#### **--network-mode**=*none* +set the network mode for the container + ## DESCRIPTION Benchmark specified AI Model. diff --git a/docs/ramalama-convert.1.md b/docs/ramalama-convert.1.md index 19fad843..9feb7857 100644 --- a/docs/ramalama-convert.1.md +++ b/docs/ramalama-convert.1.md @@ -25,6 +25,9 @@ type of OCI Model Image to convert. | car | Includes base image with the model stored in a /models subdir | | raw | Only the model and a link file model.file to it stored at / | +#### **--network-mode**=*none* +sets the configuration for network namespaces when handling RUN instructions + ## EXAMPLE Generate an oci model out of an Ollama model. diff --git a/docs/ramalama-run.1.md b/docs/ramalama-run.1.md index e0a69bd6..b318e4a1 100644 --- a/docs/ramalama-run.1.md +++ b/docs/ramalama-run.1.md @@ -53,6 +53,9 @@ llama.cpp explains this as: #### **--tls-verify**=*true* require HTTPS and verify certificates when contacting OCI registries +#### **--network-mode**=*none* +set the network mode for the container + ## DESCRIPTION Run specified AI Model as a chat bot. RamaLama pulls specified AI Model from registry if it does not exist in local storage. By default a prompt for a chat diff --git a/docs/ramalama-serve.1.md b/docs/ramalama-serve.1.md index 49840ef7..972e894b 100644 --- a/docs/ramalama-serve.1.md +++ b/docs/ramalama-serve.1.md @@ -64,6 +64,9 @@ IP address for llama.cpp to listen on. #### **--name**, **-n** Name of the container to run the Model in. +#### **--network-mode**=*bridge* +set the network mode for the container + #### **--port**, **-p** port for AI Model server to listen on diff --git a/ramalama/cli.py b/ramalama/cli.py index 2d029841..828afbe2 100644 --- a/ramalama/cli.py +++ b/ramalama/cli.py @@ -383,7 +383,7 @@ def bench_parser(subparsers): "--network-mode", type=str, default="none", - help="Set the network mode for the container.", + help="set the network mode for the container", ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=bench_cli) @@ -611,7 +611,7 @@ def convert_parser(subparsers): "--network-mode", type=str, default="none", - help="Sets the configuration for network namespaces when handling RUN instructions.", + help="sets the configuration for network namespaces when handling RUN instructions", ) parser.add_argument("SOURCE") # positional argument parser.add_argument("TARGET") # positional argument @@ -737,7 +737,7 @@ def run_parser(subparsers): "--network-mode", type=str, default="none", - help="Set the network mode for the container.", + help="set the network mode for the container", ) parser.add_argument("MODEL") # positional argument parser.add_argument( @@ -764,11 +764,15 @@ def serve_parser(subparsers): parser.add_argument( "-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on" ) + # --network-mode=bridge lets the container listen on localhost, and is an option that's compatible + # with podman and docker: + # https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net + # https://docs.docker.com/engine/network/#drivers parser.add_argument( "--network-mode", type=str, default="bridge", - help="Set the network mode for the container.", + help="set the network mode for the container", ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=serve_cli)