Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

common: general improvements #695

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ramalama/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,6 +663,8 @@ def _get_source(args):
if smodel.type == "OCI":
return src
else:
if not smodel.exists(args):
return smodel.pull(args)
return smodel.path(args)


Expand Down
74 changes: 60 additions & 14 deletions ramalama/common.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,29 @@
"""ramalama common module."""
dougsland marked this conversation as resolved.
Show resolved Hide resolved

import glob
import hashlib
import os
import random
import logging
import shutil
import string
import subprocess
import time
import sys
import urllib.request
import urllib.error
import ramalama.console as console

from ramalama.http_client import HttpClient


logging.basicConfig(
level=logging.WARNING,
format="%(asctime)s - %(levelname)s - %(message)s"
)

MNT_DIR = "/mnt/models"
MNT_FILE = f"{MNT_DIR}/model.file"
HTTP_RANGE_NOT_SATISFIABLE = 416


def container_manager():
Expand Down Expand Up @@ -165,25 +175,61 @@ def download_file(url, dest_path, headers=None, show_progress=True):
headers (dict): Optional headers to include in the request.
show_progress (bool): Whether to show a progress bar during download.

Returns:
None
Raises:
RuntimeError: If the download fails after multiple attempts.
"""
http_client = HttpClient()

headers = headers or {}

# if we are not a tty, don't show progress, can pollute CI output and such
# If not running in a TTY, disable progress to prevent CI pollution
if not sys.stdout.isatty():
show_progress = False

try:
http_client.init(url=url, headers=headers, output_file=dest_path, progress=show_progress)
except urllib.error.HTTPError as e:
if e.code == 416: # Range not satisfiable
if show_progress:
print(f"File {url} already fully downloaded.")
else:
raise e
http_client = HttpClient()
max_retries = 5 # Stop after 5 failures
retries = 0

while retries < max_retries:
try:
# Initialize HTTP client for the request
http_client.init(url=url, headers=headers, output_file=dest_path, progress=show_progress)
return # Exit function if successful

except urllib.error.HTTPError as e:
dougsland marked this conversation as resolved.
Show resolved Hide resolved
if e.code == HTTP_RANGE_NOT_SATISFIABLE: # "Range Not Satisfiable" error (file already downloaded)
return # No need to retry

except urllib.error.URLError as e:
console.error(f"Network Error: {e.reason}")
retries += 1

except TimeoutError:
retries += 1
console.warning(f"TimeoutError: The server took too long to respond. Retrying {retries}/{max_retries}...")

except RuntimeError as e: # Catch network-related errors from HttpClient
retries += 1
console.warning(f"{e}. Retrying {retries}/{max_retries}...")

except IOError as e:
retries += 1
console.warning(f"I/O Error: {e}. Retrying {retries}/{max_retries}...")

except Exception as e:
dougsland marked this conversation as resolved.
Show resolved Hide resolved
console.error(f"Unexpected error: {str(e)}")
raise

if retries >= max_retries:
error_message = (
"\nDownload failed after multiple attempts.\n"
"Possible causes:\n"
"- Internet connection issue\n"
"- Server is down or unresponsive\n"
"- Firewall or proxy blocking the request\n"
)
console.error(error_message)
sys.exit(1)
dougsland marked this conversation as resolved.
Show resolved Hide resolved

time.sleep(2 ** retries * 0.1) # Exponential backoff (0.1s, 0.2s, 0.4s...)


def engine_version(engine):
Expand Down
35 changes: 35 additions & 0 deletions ramalama/console.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import os
import sys
import locale
import logging

def is_locale_utf8():
"""Check if the system locale is UTF-8."""
return 'UTF-8' in os.getenv('LC_CTYPE', '') or 'UTF-8' in os.getenv('LANG', '')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code looks great, LC_ALL is the other or you could add if you wanted


def supports_emoji():
"""Detect if the terminal supports emoji output."""
term = os.getenv("TERM")
if not term or term in ("dumb", "linux"):
return False

return is_locale_utf8()

# Allow users to override emoji support via an environment variable
# If RAMALAMA_FORCE_EMOJI is not set, it defaults to checking supports_emoji()
RAMALAMA_FORCE_EMOJI = os.getenv("RAMALAMA_FORCE_EMOJI")
FORCE_EMOJI = RAMALAMA_FORCE_EMOJI.lower() == "true" if RAMALAMA_FORCE_EMOJI else None
EMOJI = FORCE_EMOJI if FORCE_EMOJI is not None else supports_emoji()

# Define emoji-aware logging messages
def error(msg):
formatted_msg = f"❌ {msg}" if EMOJI else f"[ERROR] {msg}"
logging.error(formatted_msg)

def warning(msg):
formatted_msg = f"⚠️ {msg}" if EMOJI else f"[WARNING] {msg}"
logging.warning(formatted_msg)

def info(msg):
formatted_msg = f"ℹ️ {msg}" if EMOJI else f"[INFO] {msg}"
logging.info(formatted_msg)
2 changes: 2 additions & 0 deletions ramalama/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,8 @@ def pull(self, args):
try:
return init_pull(repos, accept, registry_head, model_name, model_tag, models, model_path, self.model)
except urllib.error.HTTPError as e:
if "Not Found" in e.reason:
raise KeyError(f"{self.model} was not found in the Ollama registry")
raise KeyError(f"failed to pull {registry_head}: " + str(e).strip("'"))

def model_path(self, args):
Expand Down
2 changes: 1 addition & 1 deletion test/system/040-serve.bats
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ verify_begin=".*run --rm -i --label RAMALAMA --security-opt=label=disable --name
fi

run_ramalama 1 serve MODEL
is "$output" ".*Error: failed to pull .*MODEL" "failed to pull model"
is "$output" "Error: MODEL was not found in the Ollama registry"
}

@test "ramalama --detach serve" {
Expand Down
2 changes: 1 addition & 1 deletion test/system/050-pull.bats
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ load setup_suite

random_image_name=i_$(safename)
run_ramalama 1 pull ${random_image_name}
is "$output" "Error: failed to pull https://registry.ollama.ai/v2/library/${random_image_name}: HTTP Error 404: Not Found" "image does not exist"
is "$output" "Error: ${random_image_name} was not found in the Ollama registry"
}

# bats test_tags=distro-integration
Expand Down
2 changes: 1 addition & 1 deletion test/system/055-convert.bats
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ load helpers
run_ramalama 2 convert tiny
is "$output" ".*ramalama convert: error: the following arguments are required: TARGET"
run_ramalama 1 convert bogus foobar
is "$output" "Error: bogus does not exist"
is "$output" "Error: bogus was not found in the Ollama registry"
}

@test "ramalama convert file to image" {
Expand Down
Loading