diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 8dc9347..1ff8581 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -24,11 +24,14 @@ jobs: os: - Ubuntu py: -# - "3.12" + - "3.12" - "3.11" - "3.10" - "3.9" -# - "3.8" + kubernetes: + - "1.31.0" + - "1.30.0" + - "1.29.0" steps: - name: Setup python for test ${{ matrix.py }} uses: actions/setup-python@v4 @@ -54,7 +57,8 @@ jobs: run: poetry install --all-extras --sync - name: Run tests env: - PYTEST_KUBERAY_VERSIONS: "1.0.0,1.1.0" # will run tests for all these KubeRay versions + PYTEST_KUBERAY_VERSIONS: "1.0.0,1.1.0,1.2.0" # will run tests for all these KubeRay versions + PYTEST_KUBERNETES_VERSION: ${{ matrix.kubernetes }} run: pytest -v . lint: diff --git a/README.md b/README.md index a56aa91..7cb6e58 100644 --- a/README.md +++ b/README.md @@ -115,10 +115,70 @@ definitions = Definitions( This backend requires a Kubernetes cluster with the `KubeRay Operator` installed. -Integrates with [Dagster+](https://dagster.io/plus) by injecting environment variables such as `DAGSTER_CLOUD_DEPLOYMENT_NAME` and tags such as `dagster/user` into default configuration values and `RayCluster` labels. +Integrates with [Dagster+](https://dagster.io/plus) by injecting environment variables such as `DAGSTER_CLOUD_DEPLOYMENT_NAME` and tags such as `dagster/user` into default configuration values and Kubernetes labels. + +To run `ray` code in client mode (from the Dagster Python process directly), use the `KubeRayClient` resource (see the [KubeRayCluster](#KubeRayCluster) section). +To run `ray` code in job mode, use the `PipesRayJobClient` with Dagster Pipes (see the [Pipes](#pipes) section). The public objects can be imported from `dagster_ray.kuberay` module. +### Pipes + +`dagster-ray` provides the `PipesRayJobClient` which can be used to execute remote Ray jobs on Kubernetes and receive Dagster events and logs from them. +[RayJob](https://docs.ray.io/en/latest/cluster/kubernetes/getting-started/rayjob-quick-start.html) will manage the lifecycle of the underlying `RayCluster`, which will be cleaned up after the specified entrypoint exits. + +Examples: + +On the orchestration side, import the `PipesRayJobClient` and invoke it inside an `@op` or an `@asset`: + +```python +from dagster import asset, Definitions + +from dagster_ray.kuberay import PipesRayJobClient + + +@asset +def my_asset(pipes_ray_job_client: PipesRayJobClient): + pipes_ray_job_client.run( + ray_job={ + # RayJob manifest goes here, only .metadata.name is not required and will be generated if not provided + # full reference: https://ray-project.github.io/kuberay/reference/api/#rayjob + ... + }, + extra={"foo": "bar"}, + ) + + +definitions = Definitions( + resources={"pipes_ray_job_client": PipesRayJobClient()}, assets=[my_asset] +) +``` + +In the Ray job, import `dagster_pipes` (must be provided as a dependency) and emit regular Dagster events such as logs or asset materializations: + +```python +from dagster_pipes import open_dagster_pipes + + +with open_dagster_pipes() as pipes: + pipes.log("Hello from Ray!") + pipes.report_asset_materialization( + metadata={"some_metric": {"raw_value": 0, "type": "int"}}, + data_version="alpha", + ) +``` + +A convenient way to provide `dagster-pipes` to the Ray job is with `runtimeEnvYaml` field: + +```python +import yaml + +ray_job = {"spec": {"runtimeEnvYaml": yaml.safe_dump({"pip": ["dagster-pipes"]})}} +``` + +The logs and events emitted by the Ray job will be captured by the `PipesRayJobClient` and will become available in the Dagster event log. +Normal stdout & stderr will be forwarded to stdout. + ### Resources #### `KubeRayCluster` @@ -175,7 +235,7 @@ ray_cluster = KubeRayCluster( ) ) ``` -#### `KubeRayAPI` +#### `KubeRayClient` This resource can be used to interact with the Kubernetes API Server. @@ -185,14 +245,14 @@ Listing currently running `RayClusters`: ```python from dagster import op, Definitions -from dagster_ray.kuberay import KubeRayAPI +from dagster_ray.kuberay import KubeRayClient @op def list_ray_clusters( - kube_ray_api: KubeRayAPI, + kube_ray_client: KubeRayClient, ): - return kube_ray_api.kuberay.list_ray_clusters(k8s_namespace="kuberay") + return kube_ray_client.client.list(namespace="kuberay") ``` ### Jobs diff --git a/dagster_ray/_base/constants.py b/dagster_ray/_base/constants.py new file mode 100644 index 0000000..fc87b64 --- /dev/null +++ b/dagster_ray/_base/constants.py @@ -0,0 +1,9 @@ +import os + +DEFAULT_DEPLOYMENT_NAME = ( + os.getenv("DAGSTER_CLOUD_DEPLOYMENT_NAME") + if os.getenv("DAGSTER_CLOUD_IS_BRANCH_DEPLOYMENT") == "0" + else os.getenv("DAGSTER_CLOUD_GIT_BRANCH") +) or "dev" + +IS_PROD = DEFAULT_DEPLOYMENT_NAME == "prod" diff --git a/dagster_ray/_base/resources.py b/dagster_ray/_base/resources.py index 55f228a..7b8ce17 100644 --- a/dagster_ray/_base/resources.py +++ b/dagster_ray/_base/resources.py @@ -1,7 +1,6 @@ -import sys import uuid from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Optional, Union, cast +from typing import TYPE_CHECKING, Dict, Optional, Union, cast from dagster import ConfigurableResource, InitResourceContext, OpExecutionContext from pydantic import Field, PrivateAttr @@ -11,14 +10,9 @@ from requests.exceptions import ConnectionError from tenacity import retry, retry_if_exception_type, stop_after_delay +from dagster_ray._base.utils import get_dagster_tags from dagster_ray.config import RayDataExecutionOptions -if sys.version_info >= (3, 11): - pass -else: - pass - - if TYPE_CHECKING: from ray._private.worker import BaseContext as RayBaseContext # noqa @@ -86,6 +80,10 @@ def init_ray(self, context: Union[OpExecutionContext, InitResourceContext]) -> " context.log.info("Initialized Ray!") return cast("RayBaseContext", self._context) + def get_dagster_tags(self, context: InitResourceContext) -> Dict[str, str]: + tags = get_dagster_tags(context) + return tags + def _get_step_key(self, context: InitResourceContext) -> str: # just return a random string # since we want a fresh cluster every time diff --git a/dagster_ray/_base/utils.py b/dagster_ray/_base/utils.py new file mode 100644 index 0000000..1b6ff12 --- /dev/null +++ b/dagster_ray/_base/utils.py @@ -0,0 +1,30 @@ +import os +from typing import Dict, Union, cast + +from dagster import InitResourceContext, OpExecutionContext + +from dagster_ray._base.constants import DEFAULT_DEPLOYMENT_NAME + + +def get_dagster_tags(context: Union[InitResourceContext, OpExecutionContext]) -> Dict[str, str]: + """ + Returns a dictionary with common Dagster tags. + """ + assert context.dagster_run is not None + + labels = { + "dagster.io/run_id": cast(str, context.run_id), + "dagster.io/deployment": DEFAULT_DEPLOYMENT_NAME, + # TODO: add more labels + } + + if context.dagster_run.tags.get("user"): + labels["dagster.io/user"] = context.dagster_run.tags["user"] + + if os.getenv("DAGSTER_CLOUD_GIT_BRANCH"): + labels["dagster.io/git-branch"] = os.environ["DAGSTER_CLOUD_GIT_BRANCH"] + + if os.getenv("DAGSTER_CLOUD_GIT_SHA"): + labels["dagster.io/git-sha"] = os.environ["DAGSTER_CLOUD_GIT_SHA"] + + return labels diff --git a/dagster_ray/config.py b/dagster_ray/config.py index 5adb844..37090f9 100644 --- a/dagster_ray/config.py +++ b/dagster_ray/config.py @@ -2,10 +2,8 @@ from typing import Optional -import ray from dagster import Config from pydantic import Field -from ray.data import ExecutionResources class ExecutionOptionsConfig(Config): @@ -23,6 +21,9 @@ class RayDataExecutionOptions(Config): use_polars: bool = True def apply(self): + import ray + from ray.data import ExecutionResources + ctx = ray.data.DatasetContext.get_current() ctx.execution_options.resource_limits = ExecutionResources.for_limits( @@ -35,6 +36,8 @@ def apply(self): ctx.use_polars = self.use_polars def apply_remote(self): + import ray + @ray.remote def apply(): self.apply() diff --git a/dagster_ray/kuberay/__init__.py b/dagster_ray/kuberay/__init__.py index 8580d21..d312556 100644 --- a/dagster_ray/kuberay/__init__.py +++ b/dagster_ray/kuberay/__init__.py @@ -1,13 +1,13 @@ from dagster_ray.kuberay.configs import RayClusterConfig from dagster_ray.kuberay.jobs import cleanup_kuberay_clusters, delete_kuberay_clusters from dagster_ray.kuberay.ops import cleanup_kuberay_clusters_op, delete_kuberay_clusters_op -from dagster_ray.kuberay.resources import KubeRayAPI, KubeRayCluster +from dagster_ray.kuberay.resources import KubeRayCluster, RayClusterClientResource from dagster_ray.kuberay.schedules import cleanup_kuberay_clusters_daily __all__ = [ "KubeRayCluster", "RayClusterConfig", - "KubeRayAPI", + "RayClusterClientResource", "cleanup_kuberay_clusters", "delete_kuberay_clusters", "cleanup_kuberay_clusters_op", diff --git a/dagster_ray/kuberay/client/__init__.py b/dagster_ray/kuberay/client/__init__.py new file mode 100644 index 0000000..b8e5bb5 --- /dev/null +++ b/dagster_ray/kuberay/client/__init__.py @@ -0,0 +1,4 @@ +from dagster_ray.kuberay.client.raycluster import RayClusterClient +from dagster_ray.kuberay.client.rayjob import RayJobClient + +__all__ = ["RayClusterClient", "RayJobClient"] diff --git a/dagster_ray/kuberay/client/base.py b/dagster_ray/kuberay/client/base.py new file mode 100644 index 0000000..c24019b --- /dev/null +++ b/dagster_ray/kuberay/client/base.py @@ -0,0 +1,151 @@ +import time +from typing import TYPE_CHECKING, Any, Dict, Optional + +from kubernetes import client +from kubernetes.client import ApiException + +if TYPE_CHECKING: + from kubernetes.client.models.v1_endpoints import V1Endpoints + + +def load_kubeconfig(context: Optional[str] = None, config_file: Optional[str] = None) -> Any: + from kubernetes import config + + try: + return config.load_kube_config(context=context, config_file=config_file) + except config.config_exception.ConfigException: + try: + return config.load_incluster_config() + except config.config_exception.ConfigException: + pass + + +class BaseKubeRayClient: + def __init__( + self, + group: str, + version: str, + kind: str, + plural: str, + context: Optional[str] = None, + config_file: Optional[str] = None, + ): + self.group = group + self.version = version + self.kind = kind + self.plural = plural + self.config_file = config_file + self.context = context + + self.kube_config: Optional[Any] = load_kubeconfig(config_file=config_file, context=context) + + self._api = client.CustomObjectsApi() + self._core_v1_api = client.CoreV1Api() + + def wait_for_service_endpoints(self, service_name: str, namespace: str, poll_interval: int = 5, timeout: int = 60): + start_time = time.time() + + while True: + try: + # Get the endpoints for the service + endpoints: V1Endpoints = self._core_v1_api.read_namespaced_endpoints(service_name, namespace) # type: ignore + + # Check if there are addresses in any of the subsets + if endpoints.subsets: + for subset in endpoints.subsets: + if subset.addresses: + return + except ApiException: + pass + + elapsed_time = time.time() - start_time + if elapsed_time > timeout: + raise TimeoutError( + f"Timed out waiting for endpoints for service {service_name} in namespace {namespace}" + ) + + time.sleep(poll_interval) + + def get_status(self, name: str, namespace: str, timeout: int = 60, poll_interval: int = 5) -> Dict[str, Any]: + while timeout > 0: + try: + resource: Any = self._api.get_namespaced_custom_object_status( + group=self.group, + version=self.version, + plural=self.plural, + name=name, + namespace=namespace, + ) + except ApiException: + raise + + if resource.get("status"): + return resource["status"] + else: + time.sleep(poll_interval) + timeout -= poll_interval + + raise TimeoutError(f"Timed out waiting for status of {self.kind} {name} in namespace {namespace}") + + def list(self, namespace: str, label_selector: str = "", async_req: bool = False) -> Dict[str, Any]: + try: + resource: Any = self._api.list_namespaced_custom_object( + group=self.group, + version=self.version, + plural=self.plural, + namespace=namespace, + label_selector=label_selector, + async_req=async_req, + ) + if "items" in resource: + return resource + else: + return {} + except ApiException as e: + if e.status == 404: + return {} + + raise + + def get(self, name: str, namespace: str) -> Dict[str, Any]: + try: + resource: Any = self._api.get_namespaced_custom_object( + group=self.group, + version=self.version, + plural=self.plural, + name=name, + namespace=namespace, + ) + return resource + except ApiException as e: + if e.status == 404: + return {} + raise + + def create(self, body: Any, namespace: str) -> Any: + return self._api.create_namespaced_custom_object( + group=self.group, + version=self.version, + plural=self.plural, + body=body, + namespace=namespace, + ) + + def delete(self, name: str, namespace: str): + return self._api.delete_namespaced_custom_object( + group=self.group, + version=self.version, + plural=self.plural, + name=name, + namespace=namespace, + ) + + def uodate(self, name: str, ray_patch: Any, namespace: str): + return self._api.patch_namespaced_custom_object( + group=self.group, + version=self.version, + plural=self.plural, + name=name, + body=ray_patch, + namespace=namespace, + ) diff --git a/dagster_ray/kuberay/client/raycluster/__init__.py b/dagster_ray/kuberay/client/raycluster/__init__.py new file mode 100644 index 0000000..62ec717 --- /dev/null +++ b/dagster_ray/kuberay/client/raycluster/__init__.py @@ -0,0 +1,3 @@ +from dagster_ray.kuberay.client.raycluster.client import RayClusterClient, RayClusterStatus + +__all__ = ["RayClusterClient", "RayClusterStatus"] diff --git a/dagster_ray/kuberay/client/raycluster/client.py b/dagster_ray/kuberay/client/raycluster/client.py new file mode 100644 index 0000000..c15f76e --- /dev/null +++ b/dagster_ray/kuberay/client/raycluster/client.py @@ -0,0 +1,267 @@ +import logging +import socket +import subprocess +import threading +import time +from contextlib import contextmanager +from io import FileIO +from queue import Queue +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + Optional, + Tuple, + TypedDict, + cast, +) + +from kubernetes import watch +from typing_extensions import NotRequired + +from dagster_ray.kuberay.client.base import BaseKubeRayClient + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from ray.job_submission import JobSubmissionClient + + +GROUP = "ray.io" +VERSION = "v1" +PLURAL = "rayclusters" +KIND = "RayCluster" + + +def enqueue_output(out: FileIO, queue: Queue, should_stop): + for line in iter(out.readline, b""): + if should_stop(): + return + queue.put(line) + out.close() + + +def get_random_available_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) # Bind to an available port chosen by the OS + _, port = s.getsockname() # Get the port number assigned + return port + + +class RayClusterEndpoints(TypedDict): # these are ports + client: str + dashboard: str + metrics: str + redis: str + serve: str + + +class RayClusterHead(TypedDict): + podIP: str + serviceIP: str + + +class RayClusterStatus(TypedDict): + desiredCPU: str + desiredGPU: str + desiredMemory: str + desiredTPU: str + + lastUpdateTime: str # "2024-09-16T09:34:29Z" + maxWorkerReplicas: int + observedGeneration: int + + head: NotRequired[RayClusterHead] + endpoints: NotRequired[RayClusterEndpoints] + state: NotRequired[str] + + +class RayClusterClient(BaseKubeRayClient): + def __init__(self, config_file: Optional[str] = None, context: Optional[str] = None) -> None: + super().__init__( + group=GROUP, + version=VERSION, + kind=KIND, + plural=PLURAL, + config_file=config_file, + context=context, + ) + + def get_status(self, name: str, namespace: str, timeout: int = 60, poll_interval: int = 5) -> RayClusterStatus: # type: ignore + return cast( + RayClusterStatus, + super().get_status(name=name, namespace=namespace, timeout=timeout, poll_interval=poll_interval), + ) + + def wait_until_ready( + self, + name: str, + namespace: str, + timeout: int, + image: Optional[str] = None, + ) -> Tuple[str, Dict[str, str]]: + """ + If ready, returns service ip address and a dictionary of ports. + Dictionary keys: ["client", "dashboard", "metrics", "redis", "serve"] + """ + + w = watch.Watch() + + start_time = time.time() + + # TODO: use get_namespaced_custom_object instead + # once https://github.com/kubernetes-client/python/issues/1679 + # is solved + for event in w.stream( + self._api.list_namespaced_custom_object, + self.group, + self.version, + namespace, + self.plural, + ): + item = cast(Dict[str, Any], event["raw_object"]) # type: ignore + + if "status" not in item: + continue + + status: RayClusterStatus = item["status"] + + if status.get("state") == "failed": + raise Exception( + f"RayCluster {namespace}/{name} failed to start. More details: `kubectl -n {namespace} describe RayCluster {name}`" + ) + + if ( + item.get("metadata") # type: ignore + and item["metadata"].get("name") == name # type: ignore + and status.get("state") == "ready" + and status.get("head") + ): + if image is not None: + if ( + item.get("spec") # type: ignore + and item["spec"]["headGroupSpec"]["template"]["spec"]["containers"][0]["image"] # type: ignore + != image + ): + continue + w.stop() + logger.debug(f"RayCluster {namespace}/{name} is ready!") + return status["head"]["serviceIP"], status["endpoints"] # type: ignore + + if time.time() - start_time > timeout: + w.stop() + raise TimeoutError( + f"Timed out waiting for RayCluster {namespace}/{name} to be ready. " f"Status: {status}" + ) + + raise Exception("This code should be unreachable") + + @contextmanager + def port_forward( + self, + name: str, + namespace: str, + local_dashboard_port: int = 8265, + local_gcs_port: int = 10001, + ) -> Iterator[Tuple[int, int]]: + """ + Port forwards the Ray dashboard and GCS ports to localhost. + Use 0 for local_dashboard_port and local_gcs_port to get random available ports. + Returns the ports that the dashboard and GCS are forwarded to. + """ + if local_dashboard_port == 0: + local_dashboard_port = get_random_available_port() + + if local_gcs_port == 0: + local_gcs_port = get_random_available_port() + + service = f"{name}-head-svc" + + if not self.get_status(name, namespace): + raise RuntimeError(f"RayCluster {name} does not exist in namespace {namespace}") + + self.wait_for_service_endpoints(service_name=service, namespace=namespace) + + cmd = [ + "kubectl", + "port-forward", + "-n", + namespace, + f"svc/{service}", + f"{local_dashboard_port}:8265", + f"{local_gcs_port}:10001", + ] + + if self.context: + cmd.extend(["--context", self.context]) + + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + queue = Queue() + SHOULD_STOP = False + + def should_stop(): + return SHOULD_STOP + + t = threading.Thread(target=enqueue_output, args=(process.stdout, queue, should_stop)) + t.daemon = True # thread dies with the program + t.start() + + time.sleep(0.01) + + try: + # inspect stdout to avoid connecting too early + # we must check for "Forwarding from" in stdout + # + + while True: + if process.poll() is not None: + raise RuntimeError( + f"port-forwarding command: `{' '.join(cmd)}` failed. Most likely ports {local_dashboard_port} or {local_gcs_port} are already in use, or Kubernetes service {name}-head-svc does not exist in namespace {namespace}." + ) + + line = queue.get() + + if "Forwarding from" in line: + break + + logger.debug(f"port-forwarding for ports {local_dashboard_port} and {local_gcs_port} started") + + yield local_dashboard_port, local_gcs_port + finally: + # terminate the thread + SHOULD_STOP = True + process.kill() + process.wait() + t.join() + logger.debug(f"port-forwarding for ports {local_dashboard_port} and {local_gcs_port} stopped") + + @contextmanager + def job_submission_client( + self, name: str, namespace: str, port_forward: bool = False + ) -> Iterator["JobSubmissionClient"]: + """ + Returns a JobSubmissionClient object that can be used to interact with Ray jobs running in the KubeRay cluster. + If port_forward is True, it will port forward the dashboard and GCS ports to localhost, and should be used in a context manager. + If port_forward is False, the client will connect to the dashboard directly (assuming the dashboard is accessible from the host). + """ + + from ray.job_submission import JobSubmissionClient + + if not port_forward: + # TODO: revisit the decision to use this as context-manager in this case + # + status = self.get_status(name, namespace) + + host = status["head"]["serviceIP"] # type: ignore + dashboard_port = status["endpoints"]["dashboard"] # type: ignore + + yield JobSubmissionClient(address=f"http://{host}:{dashboard_port}") + else: + self.wait_for_service_endpoints(service_name=f"{name}-head-svc", namespace=namespace) + with self.port_forward(name=name, namespace=namespace, local_dashboard_port=0, local_gcs_port=0) as ( + local_dashboard_port, + _, + ): + yield JobSubmissionClient(address=f"http://localhost:{local_dashboard_port}") diff --git a/dagster_ray/kuberay/client/rayjob/__init__.py b/dagster_ray/kuberay/client/rayjob/__init__.py new file mode 100644 index 0000000..5de9751 --- /dev/null +++ b/dagster_ray/kuberay/client/rayjob/__init__.py @@ -0,0 +1,3 @@ +from dagster_ray.kuberay.client.rayjob.client import RayJobClient + +__all__ = ["RayJobClient"] diff --git a/dagster_ray/kuberay/client/rayjob/client.py b/dagster_ray/kuberay/client/rayjob/client.py new file mode 100644 index 0000000..9edb72a --- /dev/null +++ b/dagster_ray/kuberay/client/rayjob/client.py @@ -0,0 +1,179 @@ +import logging +import time +from typing import Iterator, Literal, NotRequired, Optional, TypedDict, cast + +from dagster_ray.kuberay.client.base import BaseKubeRayClient +from dagster_ray.kuberay.client.raycluster import RayClusterClient, RayClusterStatus + +GROUP = "ray.io" +VERSION = "v1" +PLURAL = "rayjobs" +KIND = "RayJob" + +logger = logging.getLogger(__name__) + + +# only these fields are set: +# jobDeploymentStatus: Initializing +# jobId: dan-continuous-sunspot-cxx59 +# rayClusterName: dan-continuous-sunspot-raycluster-5knd4 +# rayClusterStatus +# startTime + + +class RayJobStatus(TypedDict): + jobId: str + jobDeploymentStatus: str + rayClusterName: str + rayClusterStatus: RayClusterStatus + startTime: str + + dashboardURL: NotRequired[str] + jobDeploymentStatus: NotRequired[str] + endTime: NotRequired[str] + jobStatus: NotRequired[Literal["PENDING", "RUNNING", "SUCCEEDED", "FAILED", "STOPPED"]] + message: NotRequired[str] + + +class RayJobClient(BaseKubeRayClient): + def __init__(self, config_file: Optional[str] = None, context: Optional[str] = None) -> None: + super().__init__( + group=GROUP, + version=VERSION, + kind=KIND, + plural=PLURAL, + config_file=config_file, + context=context, + ) + + def get_status(self, name: str, namespace: str, timeout: int = 60, poll_interval: int = 5) -> RayJobStatus: # type: ignore + return cast( + RayJobStatus, + super().get_status(name=name, namespace=namespace, timeout=timeout, poll_interval=poll_interval), + ) + + def get_ray_cluster_name(self, name: str, namespace: str) -> str: + return self.get_status(name, namespace)["rayClusterName"] + + def get_job_sumission_id(self, name: str, namespace: str) -> str: + return self.get_status(name, namespace)["jobId"] + + @property + def ray_cluster_client(self) -> RayClusterClient: + return RayClusterClient(config_file=self.kube_config, context=self.context) + + def wait_until_running( + self, + name: str, + namespace: str, + timeout: int = 60 * 60, + poll_interval: int = 5, + ) -> bool: + start_time = time.time() + + while True: + status = self.get_status(name, namespace, timeout, poll_interval).get("jobDeploymentStatus") + + if status in ["Running", "Complete"]: + break + elif status == "Failed": + return False + + if time.time() - start_time > timeout: + raise TimeoutError( + f"Timed out waiting for RayJob {name} deployment to become available." f"Status: {status}" + ) + + time.sleep(poll_interval) + + while True: + status = self.get_status(name, namespace, timeout, poll_interval).get("jobStatus") + + if status: + break + + if time.time() - start_time > timeout: + raise TimeoutError(f"Timed out waiting for RayJob {name} to start. " f"Status: {status}") + + time.sleep(poll_interval) + + return True + + def _wait_for_job_submission( + self, + name: str, + namespace: str, + timeout: int = 600, + poll_interval: int = 10, + ): + start_time = time.time() + + while True: + status = self.get_status(name, namespace) + if status.get("jobDeploymentStatus") in ["Complete", "Failed"]: + return + + if (job_status := status.get("jobStatus")) is not None: + if job_status != "PENDING": + return + + if time.time() - start_time > timeout: + raise TimeoutError(f"Timed out waiting for job {name} to start") + + logger.debug(f"RayJob {namespace}/{name} deployment status is {job_status}, waiting for it to start...") + + time.sleep(poll_interval) + + def get_job_logs(self, name: str, namespace: str, timeout: int = 60 * 60, port_forward: bool = False) -> str: + self._wait_for_job_submission(name, namespace, timeout=timeout) + with self.ray_cluster_client.job_submission_client( + name=self.get_ray_cluster_name(name, namespace), namespace=namespace, port_forward=port_forward + ) as job_submission_client: + return job_submission_client.get_job_logs(job_id=self.get_job_sumission_id(name, namespace)) + + def tail_job_logs( + self, name: str, namespace: str, timeout: int = 60 * 60, port_forward: bool = False + ) -> Iterator[str]: + import asyncio + + self._wait_for_job_submission(name, namespace, timeout=timeout) + with self.ray_cluster_client.job_submission_client( + name=self.get_ray_cluster_name(name, namespace), namespace=namespace, port_forward=port_forward + ) as job_submission_client: + async_tailer = job_submission_client.tail_job_logs(job_id=self.get_job_sumission_id(name, namespace)) + + # Backward compatible sync generator + def tail_logs() -> Iterator[str]: + while True: + try: + yield asyncio.get_event_loop().run_until_complete(async_tailer.__anext__()) # type: ignore + except StopAsyncIteration: + break + + yield from tail_logs() + + def terminate(self, name: str, namespace: str, port_forward: bool = False) -> bool: + """ + Unlike the .delete method, this won't remove the Kubernetes object, but will instead stop the Ray Job. + """ + with self.ray_cluster_client.job_submission_client( + name=self.get_ray_cluster_name(name, namespace), namespace=namespace, port_forward=port_forward + ) as job_submission_client: + job_id = self.get_job_sumission_id(name, namespace) + + job_submitted = False + + while not job_submitted: + jobs = job_submission_client.list_jobs() + + for job in jobs: + if job.submission_id == job_id: + job_submitted = True + break + + logger.debug( + f"Trying to terminate job {name}, but it wasn't submitted yet. Waiting for it to be submitted..." + ) + time.sleep(10) + + return job_submission_client.stop_job(job_id=job_id) diff --git a/dagster_ray/kuberay/configs.py b/dagster_ray/kuberay/configs.py index e7a2e81..94277b8 100644 --- a/dagster_ray/kuberay/configs.py +++ b/dagster_ray/kuberay/configs.py @@ -1,10 +1,12 @@ import os -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional from dagster import Config +from pydantic import Field in_k8s = os.environ.get("KUBERNETES_SERVICE_HOST") is not None -IS_PROD = os.getenv("DAGSTER_CLOUD_DEPLOYMENT_NAME") == "prod" + + DEFAULT_AUTOSCALER_OPTIONS = { "upscalingMode": "Default", "idleTimeoutSeconds": 60, @@ -77,15 +79,23 @@ class RayClusterConfig(Config): image: Optional[str] = None - namespace: str = "kuberay" + namespace: str = "ray" enable_in_tree_autoscaling: bool = False autoscaler_options: Dict[str, Any] = DEFAULT_AUTOSCALER_OPTIONS # TODO: add a dedicated Config type head_group_spec: Dict[str, Any] = DEFAULT_HEAD_GROUP_SPEC # TODO: add a dedicated Config type worker_group_specs: List[Dict[str, Any]] = DEFAULT_WORKER_GROUP_SPECS # TODO: add a dedicated Config type -DEFAULT_DEPLOYMENT_NAME = ( - os.getenv("DAGSTER_CLOUD_DEPLOYMENT_NAME") - if os.getenv("DAGSTER_CLOUD_IS_BRANCH_DEPLOYMENT") == "0" - else os.getenv("DAGSTER_CLOUD_GIT_BRANCH") -) or "dev" +class RayJobConfig(Config): + entrypoint_num_cpus: float + entrypoint_memory: float + entrypoint_num_gpus: int + suspend: bool = False + annotations: Optional[dict[str, str]] = None + labels: Optional[dict[str, str]] = None + shutdown_after_job_finishes: bool = True + ttl_seconds_after_finished: int = 60 * 10 # 10 minutes + active_deadline_seconds: int = 60 * 60 * 24 # 24 hours + submission_mode: Literal["K8sJobMode", "HTTPMode"] = "K8sJobMode" + runtime_env_yaml: Optional[str] = None + cluster: RayClusterConfig = Field(default_factory=RayClusterConfig) diff --git a/dagster_ray/kuberay/constants.py b/dagster_ray/kuberay/constants.py deleted file mode 100644 index ddf26d9..0000000 --- a/dagster_ray/kuberay/constants.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -# Group, Version, Plural -GROUP = "ray.io" -VERSION = "v1alpha1" -PLURAL = "rayclusters" -KIND = "RayCluster" - -# log level -LOGLEVEL = logging.INFO diff --git a/dagster_ray/kuberay/jobs.py b/dagster_ray/kuberay/jobs.py index 59e2f77..02ad2ad 100644 --- a/dagster_ray/kuberay/jobs.py +++ b/dagster_ray/kuberay/jobs.py @@ -3,14 +3,14 @@ from dagster_ray.kuberay.ops import cleanup_kuberay_clusters_op, delete_kuberay_clusters_op -@job(description="Deletes KubeRay clusters from Kubernetes", name="delete_kuberay_clusters") +@job(description="Deletes RayCluster resources from Kubernetes", name="delete_kuberay_rayclusters") def delete_kuberay_clusters(): delete_kuberay_clusters_op() @job( - description="Deletes KubeRay clusters which do not correspond to any active Dagster Runs in this deployment", - name="cleanup_kuberay_clusters", + description="Deletes RayCluster resources which do not correspond to any active Dagster Runs in this deployment from Kubernetes", + name="cleanup_kuberay_rayclusters", ) def cleanup_kuberay_clusters(): cleanup_kuberay_clusters_op() diff --git a/dagster_ray/kuberay/ops.py b/dagster_ray/kuberay/ops.py index cc5ac6b..a964237 100644 --- a/dagster_ray/kuberay/ops.py +++ b/dagster_ray/kuberay/ops.py @@ -3,25 +3,25 @@ from dagster import Config, DagsterRunStatus, OpExecutionContext, RunsFilter, op from pydantic import Field -from dagster_ray.kuberay.configs import DEFAULT_DEPLOYMENT_NAME -from dagster_ray.kuberay.resources import KubeRayAPI +from dagster_ray._base.constants import DEFAULT_DEPLOYMENT_NAME +from dagster_ray.kuberay.resources import RayClusterClientResource class DeleteKubeRayClustersConfig(Config): namespace: str = "kuberay" - cluster_names: List[str] = Field(default_factory=list, description="Specific RayCluster names to delete") + cluster_names: List[str] = Field(default_factory=list, description="List of RayCluster names to delete") -@op(description="Deletes KubeRay clusters from Kubernetes", name="delete_kuberay_clusters") +@op(description="Deletes RayCluster resources from Kubernetes", name="delete_kuberay_clusters") def delete_kuberay_clusters_op( context: OpExecutionContext, config: DeleteKubeRayClustersConfig, - kuberay_api: KubeRayAPI, + client: RayClusterClientResource, ) -> None: for cluster_name in config.cluster_names: try: - if kuberay_api.kuberay.get_ray_cluster(name=cluster_name, k8s_namespace=config.namespace).get("items"): - kuberay_api.kuberay.delete_ray_cluster(name=cluster_name, k8s_namespace=config.namespace) + if client.client.get(name=cluster_name, namespace=config.namespace).get("items"): + client.client.delete(name=cluster_name, namespace=config.namespace) context.log.info(f"RayCluster {config.namespace}/{cluster_name} deleted!") else: context.log.warning(f"RayCluster {config.namespace}/{cluster_name} doesn't exist") @@ -37,13 +37,13 @@ class CleanupKuberayClustersConfig(Config): @op( - description="Deletes KubeRay clusters which do not correspond to any active Dagster Runs in this deployment", + description="Deletes RayCluster resources which do not correspond to any active Dagster Runs in this deployment from Kubernetes", name="cleanup_kuberay_clusters", ) def cleanup_kuberay_clusters_op( context: OpExecutionContext, config: CleanupKuberayClustersConfig, - kuberay_api: KubeRayAPI, + client: RayClusterClientResource, ) -> None: current_runs = context.instance.get_runs( filters=RunsFilter( @@ -55,8 +55,8 @@ def cleanup_kuberay_clusters_op( ) ) - clusters = kuberay_api.kuberay.list_ray_clusters( - k8s_namespace=config.namespace, + clusters = client.client.list( + namespace=config.namespace, label_selector=config.label_selector, )["items"] @@ -70,7 +70,7 @@ def cleanup_kuberay_clusters_op( for cluster_name in old_cluster_names: try: - kuberay_api.kuberay.delete_ray_cluster(name=cluster_name, k8s_namespace=config.namespace) + client.client.delete(name=cluster_name, namespace=config.namespace) context.log.info(f"RayCluster {config.namespace}/{cluster_name} deleted!") except: # noqa context.log.exception(f"Couldn't delete RayCluster {config.namespace}/{cluster_name}") diff --git a/dagster_ray/kuberay/pipes.py b/dagster_ray/kuberay/pipes.py new file mode 100644 index 0000000..720b702 --- /dev/null +++ b/dagster_ray/kuberay/pipes.py @@ -0,0 +1,212 @@ +import time +from typing import TYPE_CHECKING, Any, Dict, Optional, cast + +import dagster._check as check +import yaml +from dagster import DagsterInvariantViolationError, OpExecutionContext, PipesClient +from dagster._annotations import experimental +from dagster._core.definitions.resource_annotation import TreatAsResourceParam +from dagster._core.errors import DagsterExecutionInterruptedError +from dagster._core.pipes.client import ( + PipesClientCompletedInvocation, + PipesContextInjector, + PipesMessageReader, +) +from dagster._core.pipes.context import PipesSession +from dagster._core.pipes.utils import PipesEnvContextInjector, open_pipes_session +from dagster_k8s.pipes import PipesK8sPodLogsMessageReader + +from dagster_ray.kuberay.client import RayJobClient +from dagster_ray.kuberay.client.rayjob.client import RayJobStatus +from dagster_ray.pipes import PipesRayJobSubmissionClientMessageReader + +if TYPE_CHECKING: + from ray.job_submission import JobSubmissionClient + + +@experimental +class PipesRayJobClient(PipesClient, TreatAsResourceParam): + """A pipes client for running ``RayJob`` on Kubernetes. + + Args: + context_injector (Optional[PipesContextInjector]): A context injector to use to inject + context into the ``RayJob``. Defaults to :py:class:`PipesEnvContextInjector`. + message_reader (Optional[PipesMessageReader]): A message reader to use to read messages + from the glue job run. Defaults to :py:class:`PipesRayJobSubmissionClientMessageReader`. + client (Optional[boto3.client]): The Kubernetes API client. + forward_termination (bool): Whether to cancel the `RayJob` job run when the Dagster process receives a termination signal. + timeout (int): Timeout for various internal interactions with the Kubernetes RayJob. + poll_interval (int): Interval at which to poll the Kubernetes for status updates. + port_forward (bool): Whether to use Kubernetes port-forwarding to connect to the KubeRay cluster. + Is useful when running in a local environment. + """ + + client: RayJobClient + + def __init__( + self, + client: Optional[RayJobClient] = None, + context_injector: Optional[PipesContextInjector] = None, + message_reader: Optional[PipesMessageReader] = None, + forward_termination: bool = True, + timeout: int = 600, + poll_interval: int = 5, + port_forward: bool = False, + ): + self.client: RayJobClient = client or RayJobClient() + + self._context_injector = context_injector or PipesEnvContextInjector() + self._message_reader = message_reader or PipesK8sPodLogsMessageReader() + + self.forward_termination = check.bool_param(forward_termination, "forward_termination") + self.timeout = check.int_param(timeout, "timeout") + self.poll_interval = check.int_param(poll_interval, "poll_interval") + self.port_forward = check.bool_param(port_forward, "port_forward") + + self._job_submission_client: Optional["JobSubmissionClient"] = None + + @property + def job_submission_client(self) -> "JobSubmissionClient": + if self._job_submission_client is None: + raise DagsterInvariantViolationError("JobSubmissionClient is not available inside the run method.") + else: + return self._job_submission_client + + def run( + self, + *, + context: OpExecutionContext, + ray_job: Dict[str, Any], + extras: Optional[Dict[str, Any]] = None, + ) -> PipesClientCompletedInvocation: + """ + Execute a RayJob, enriched with the Pipes protocol. + + Args: + context (OpExecutionContext): Current Dagster op or asset context. + ray_job (Dict[str, Any]): RayJob specification. `API reference `_. + extras (Optional[Dict[str, Any]]): Additional information to pass to the Pipes session. + """ + with open_pipes_session( + context=context, + message_reader=self.message_reader, + context_injector=self.context_injector, + extras=extras, + ) as session: + ray_job = self._enrich_ray_job(context, session, ray_job) + start_response = self._start(context, ray_job) + + name = ray_job["metadata"]["name"] + namespace = ray_job["metadata"]["namespace"] + + with self.client.ray_cluster_client.job_submission_client( + port_forward=self.port_forward + ) as job_submission_client: + self._job_submission_client = job_submission_client + + try: + self._read_messages(context, start_response) + self._wait_for_completion(context, start_response) + return PipesClientCompletedInvocation(session) + + except DagsterExecutionInterruptedError: + if self.forward_termination: + context.log.warning( + f"[pipes] Dagster process interrupted! Will terminate RayJob {namespace}/{name}." + ) + self._terminate(context, start_response) + raise + + def _enrich_ray_job( + self, context: OpExecutionContext, session: PipesSession, ray_job: Dict[str, Any] + ) -> Dict[str, Any]: + env_vars = session.get_bootstrap_env_vars() + + ray_job["metadata"] = ray_job.get("metadata", {}) + ray_job["metadata"]["labels"] = ray_job["metadata"].get("labels", {}) + + ray_job["metadata"]["name"] = ray_job["metadata"].get("name", f"dg-{context.run_id[:6]}") + ray_job["metadata"]["labels"].update(self.get_dagster_tags(context)) + + # update env vars in runtimeEnv + runtime_env_yaml = ray_job["spec"].get("runtimeEnvYAML") + + if runtime_env_yaml is None: + runtime_env_yaml = yaml.safe_dump( + { + "env_vars": env_vars, + } + ) + else: + runtime_env = yaml.safe_load(runtime_env_yaml) + runtime_env["env_vars"] = runtime_env.get("env_vars", {}) + runtime_env["env_vars"].update(env_vars) + + ray_job["spec"]["runtimeEnvYAML"] = yaml.safe_dump(runtime_env) + + return ray_job + + def _start(self, context: OpExecutionContext, ray_job: Dict[str, Any]) -> Dict[str, Any]: + name = ray_job["metadata"]["name"] + namespace = ray_job["metadata"]["namespace"] + + context.log.info(f"[pipes] Starting RayJob {namespace}/{name}...") + + self.client.create( + k8s_namespace=ray_job["metadata"]["namespace"], + ray_job=ray_job, + ) + + self.client.wait_until_running( + name=name, + namespace=namespace, + timeout=self.timeout, + poll_interval=self.poll_interval, + ) + + return self.client.get_ray_job( + k8s_namespace=ray_job["metadata"]["namespace"], + ray_job_name=ray_job["metadata"]["name"], + ) + + def _read_messages(self, context: OpExecutionContext, start_response: Dict[str, Any]) -> None: + status = cast(RayJobStatus, start_response["status"]) + + if isinstance(self._message_reader, PipesRayJobSubmissionClientMessageReader): + # starts a thread + self._message_reader.tail_job_logs(client=self.job_submission_client, job_id=status["jobId"]) + + def _wait_for_completion(self, context: OpExecutionContext, start_response: Dict[str, Any]) -> Dict[str, Any]: + context.log.info("[pipes] Waiting for RayJob to complete...") + + name = start_response["metadata"]["name"] + namespace = start_response["metadata"]["namespace"] + + while True: + status = self.client.get_status( + name=name, + namespace=namespace, + ) + + if job_status := status.get("jobStatus"): + if job_status in ["PENDING", "RUNNING"]: + pass + elif job_status == "SUCCEEDED": + context.log.info(f"[pipes] RayJob {namespace}/{name} is complete!") + return status + elif job_status == ["STOPPED", "FAILED"]: + raise RuntimeError( + f"RayJob {namespace}/{name} status is {job_status}. Reason:\n{status.get('message')}" + ) + else: + raise RuntimeError(f"RayJob {namespace}/{name} has an unknown status: {job_status}") + + time.sleep(self.poll_interval) + + def _terminate(self, context: OpExecutionContext, start_response: Dict[str, Any]) -> None: + name = start_response["metadata"]["name"] + namespace = start_response["metadata"]["namespace"] + + context.log.info(f"[pipes] Terminating RayJob {namespace}/{name} ...") + self.client.terminate(name=name, namespace=namespace, port_forward=self.port_forward) + context.log.info(f"[pipes] RayJob {namespace}/{name} terminated.") diff --git a/dagster_ray/kuberay/ray_cluster_api.py b/dagster_ray/kuberay/ray_cluster_api.py deleted file mode 100644 index 3ac74f6..0000000 --- a/dagster_ray/kuberay/ray_cluster_api.py +++ /dev/null @@ -1,251 +0,0 @@ -import logging -import time -from typing import Any, Optional - -from kubernetes import client, config -from kubernetes.client import ApiException - -import dagster_ray.kuberay.constants as constants - -log = logging.getLogger(__name__) - - -if logging.getLevelName(log.level) == "NOTSET": - logging.basicConfig(format="%(asctime)s %(message)s", level=constants.LOGLEVEL) - - -class RayClusterApi: - """ - Taken from https://github.com/ray-project/kuberay/blob/master/clients/python-client/python_client/kuberay_cluster_api.py - This project is not published to PyPI, so it's impossible to install it as a dependency. - - List of modifications to the original RayClusterApi: - - allow passing config_file and context to __init__\ - - fixed get_ray_cluster_status hard-querying 'status' field which is not always present - - RayClusterApi provides APIs to list, get, create, build, update, delete rayclusters. - - Methods: - - list_ray_clusters(k8s_namespace: str = "default", async_req: bool = False) -> Any: - - get_ray_cluster(name: str, k8s_namespace: str = "default") -> Any: - - create_ray_cluster(body: Any, k8s_namespace: str = "default") -> Any: - - delete_ray_cluster(name: str, k8s_namespace: str = "default") -> bool: - - patch_ray_cluster(name: str, ray_patch: Any, k8s_namespace: str = "default") -> Any: - """ - - def __init__(self, config_file: Optional[str], context: Optional[str] = None): - self.kube_config: Optional[Any] = config.load_kube_config(config_file=config_file, context=context) - self.api = client.CustomObjectsApi() - self.core_v1_api = client.CoreV1Api() - - def get_ray_cluster_status( - self, name: str, k8s_namespace: str = "default", timeout: int = 60, delay_between_attempts: int = 5 - ) -> Any: - while timeout > 0: - try: - resource: Any = self.api.get_namespaced_custom_object_status( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - name=name, - namespace=k8s_namespace, - ) - except ApiException as e: - if e.status == 404: - log.error("raycluster resource is not found. error = {}".format(e)) - return None - else: - log.error("error fetching custom resource: {}".format(e)) - return None - - if resource.get("status"): # <- fixed here, was ["status"] - return resource["status"] - else: - log.info("raycluster {} status not set yet, waiting...".format(name)) - time.sleep(delay_between_attempts) - timeout -= delay_between_attempts - - log.info("raycluster {} status not set yet, timing out...".format(name)) - return None - - def __del__(self): - self.api = None - self.kube_config = None - - def list_ray_clusters( - self, k8s_namespace: str = "default", label_selector: str = "", async_req: bool = False - ) -> Any: - """List Ray clusters in a given namespace. - - Parameters: - - k8s_namespace (str, optional): The namespace in which to list the Ray clusters. Defaults to "default". - - async_req (bool, optional): Whether to make the request asynchronously. Defaults to False. - - Returns: - Any: The custom resource for Ray clusters in the specified namespace, or None if not found. - - Raises: - ApiException: If there was an error fetching the custom resource. - """ - try: - resource: Any = self.api.list_namespaced_custom_object( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - namespace=k8s_namespace, - label_selector=label_selector, - async_req=async_req, - ) - if "items" in resource: - return resource - return None - except ApiException as e: - if e.status == 404: - log.error("raycluster resource is not found. error = {}".format(e)) - return None - else: - log.error("error fetching custom resource: {}".format(e)) - return None - - def get_ray_cluster(self, name: str, k8s_namespace: str = "default") -> Any: - """Get a specific Ray cluster in a given namespace. - - Parameters: - - name (str): The name of the Ray cluster custom resource. Defaults to "". - - k8s_namespace (str, optional): The namespace in which to retrieve the Ray cluster. Defaults to "default". - - Returns: - Any: The custom resource for the specified Ray cluster, or None if not found. - - Raises: - ApiException: If there was an error fetching the custom resource. - """ - try: - resource: Any = self.api.get_namespaced_custom_object( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - name=name, - namespace=k8s_namespace, - ) - return resource - except ApiException as e: - if e.status == 404: - log.error("raycluster resource is not found. error = {}".format(e)) - return None - else: - log.error("error fetching custom resource: {}".format(e)) - return None - - def wait_until_ray_cluster_running( - self, name: str, k8s_namespace: str = "default", timeout: int = 60, delay_between_attempts: int = 5 - ) -> bool: - """Get a specific Ray cluster in a given namespace. - - Parameters: - - name (str): The name of the Ray cluster custom resource. Defaults to "". - - k8s_namespace (str, optional): The namespace in which to retrieve the Ray cluster. Defaults to "default". - - timeout (int, optional): The duration in seconds after which we stop trying to get status. Defaults to 60 seconds. - - delay_between_attempts (int, optional): The duration in seconds to wait between attempts to get status if not set. Defaults to 5 seconds. - - - - Returns: - Bool: True if the raycluster status is Running, False otherwise. - - """ - status = self.get_ray_cluster_status(name, k8s_namespace, timeout, delay_between_attempts) - - # TODO: once we add State to Status, we should check for that as well - if status and status["head"] and status["head"]["serviceIP"]: - return True - - log.info( - "raycluster {} status is not running yet, current status is {}".format( - name, status["state"] if status else "unknown" - ) - ) - return False - - def create_ray_cluster(self, body: Any, k8s_namespace: str = "default") -> Any: - """Create a new Ray cluster custom resource. - - Parameters: - - body (Any): The data of the custom resource to create. - - k8s_namespace (str, optional): The namespace in which to create the custom resource. Defaults to "default". - - Returns: - Any: The created custom resource, or None if it already exists or there was an error. - """ - try: - resource: Any = self.api.create_namespaced_custom_object( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - body=body, - namespace=k8s_namespace, - ) - return resource - except ApiException as e: - if e.status == 409: - log.error("raycluster resource already exists. error = {}".format(e.reason)) - return None - else: - log.error("error creating custom resource: {}".format(e)) - return None - - def delete_ray_cluster(self, name: str, k8s_namespace: str = "default") -> bool: - """Delete a Ray cluster custom resource. - - Parameters: - - name (str): The name of the Ray cluster custom resource to delete. - - k8s_namespace (str, optional): The namespace in which the Ray cluster exists. Defaults to "default". - - Returns: - Any: The deleted custom resource, or None if already deleted or there was an error. - """ - try: - resource: Any = self.api.delete_namespaced_custom_object( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - name=name, - namespace=k8s_namespace, - ) - return resource - except ApiException as e: - if e.status == 404: - log.error("raycluster custom resource already deleted. error = {}".format(e.reason)) - return None - else: - log.error("error deleting the raycluster custom resource: {}".format(e.reason)) - return None - - def patch_ray_cluster(self, name: str, ray_patch: Any, k8s_namespace: str = "default") -> Any: - """Patch an existing Ray cluster custom resource. - - Parameters: - - name (str): The name of the Ray cluster custom resource to be patched. - - ray_patch (Any): The patch data for the Ray cluster. - - k8s_namespace (str, optional): The namespace in which the Ray cluster exists. Defaults to "default". - - Returns: - bool: True if the patch was successful, False otherwise. - """ - try: - # we patch the existing raycluster with the new config - self.api.patch_namespaced_custom_object( - group=constants.GROUP, - version=constants.VERSION, - plural=constants.PLURAL, - name=name, - body=ray_patch, - namespace=k8s_namespace, - ) - except ApiException as e: - log.error("raycluster `{}` failed to patch, with error: {}".format(name, e)) - return False - else: - log.info("raycluster `%s` is patched successfully", name) - - return True diff --git a/dagster_ray/kuberay/resources.py b/dagster_ray/kuberay/resources.py index d08e624..06b9521 100644 --- a/dagster_ray/kuberay/resources.py +++ b/dagster_ray/kuberay/resources.py @@ -1,6 +1,5 @@ import contextlib import hashlib -import os import random import re import string @@ -8,14 +7,17 @@ from typing import Any, Dict, Generator, Optional, cast import dagster._check as check +import kubernetes from dagster import ConfigurableResource, InitResourceContext -from kubernetes import client, config, watch +from dagster._annotations import experimental from pydantic import Field, PrivateAttr +from dagster_ray._base.constants import DEFAULT_DEPLOYMENT_NAME +from dagster_ray.kuberay.client import RayClusterClient + # yes, `python-client` is actually the KubeRay package name # https://github.com/ray-project/kuberay/issues/2078 -from dagster_ray.kuberay.configs import DEFAULT_DEPLOYMENT_NAME, RayClusterConfig -from dagster_ray.kuberay.ray_cluster_api import RayClusterApi +from dagster_ray.kuberay.configs import RayClusterConfig if sys.version_info >= (3, 11): from typing import Self @@ -26,48 +28,45 @@ from ray._private.worker import BaseContext as RayBaseContext # noqa from dagster_ray._base.resources import BaseRayResource +from dagster_ray.kuberay.client.base import load_kubeconfig -class KubeRayAPI(ConfigurableResource): +@experimental +class RayClusterClientResource(ConfigurableResource): + kube_context: Optional[str] = None kubeconfig_file: Optional[str] = None - _kuberay_api: RayClusterApi = PrivateAttr() - _k8s_api: client.CustomObjectsApi = PrivateAttr() - _k8s_core_api: client.CoreV1Api = PrivateAttr() + _raycluster_client: RayClusterClient = PrivateAttr() + _k8s_api: kubernetes.client.CustomObjectsApi = PrivateAttr() + _k8s_core_api: kubernetes.client.CoreV1Api = PrivateAttr() @property - def kuberay(self) -> RayClusterApi: - if self._kuberay_api is None: - raise ValueError("KubeRayAPI not initialized") - return self._kuberay_api + def client(self) -> RayClusterClient: + if self._raycluster_client is None: + raise ValueError(f"{self.__class__.__name__} not initialized") + return self._raycluster_client @property - def k8s(self) -> client.CustomObjectsApi: + def k8s(self) -> kubernetes.client.CustomObjectsApi: if self._k8s_api is None: - raise ValueError("KubeRayAPI not initialized") + raise ValueError(f"{self.__class__.__name__} not initialized") return self._k8s_api @property - def k8s_core(self) -> client.CoreV1Api: + def k8s_core(self) -> kubernetes.client.CoreV1Api: if self._k8s_core_api is None: - raise ValueError("KubeRayAPI not initialized") + raise ValueError(f"{self.__class__.__name__} not initialized") return self._k8s_core_api def setup_for_execution(self, context: InitResourceContext) -> None: - self._load_kubeconfig(self.kubeconfig_file) - - self._kuberay_api = RayClusterApi(config_file=self.kubeconfig_file) - self._k8s_api = client.CustomObjectsApi() - self._k8s_core_api = client.CoreV1Api() + load_kubeconfig(context=self.kube_context, config_file=self.kubeconfig_file) - @staticmethod - def _load_kubeconfig(kubeconfig_file: Optional[str] = None): - try: - config.load_kube_config(config_file=kubeconfig_file) - except config.config_exception.ConfigException: - config.load_incluster_config() + self._raycluster_client = RayClusterClient(context=self.kube_context, config_file=self.kubeconfig_file) + self._k8s_api = kubernetes.client.CustomObjectsApi() + self._k8s_core_api = kubernetes.client.CoreV1Api() +@experimental class KubeRayCluster(BaseRayResource): """ Provides a RayCluster for the current step selection @@ -76,14 +75,14 @@ class KubeRayCluster(BaseRayResource): deployment_name: str = Field( default=DEFAULT_DEPLOYMENT_NAME, - description="Prefix for the RayCluster name. It's recommended to match it with the Dagster deployment name. " - "Dagster Cloud variables are used to determine the default value.", + description="Prefix for the RayCluster name. Dagster Cloud variables are used to determine the default value.", ) ray_cluster: RayClusterConfig = Field(default_factory=RayClusterConfig) skip_cleanup: bool = False skip_init: bool = False + timeout: int = Field(default=600, description="Timeout in seconds for the RayCluster to become ready") - api: KubeRayAPI = Field(default_factory=KubeRayAPI) + client: RayClusterClientResource = Field(default_factory=RayClusterClientResource) _cluster_name: str = PrivateAttr() _host: str = PrivateAttr() @@ -91,7 +90,7 @@ class KubeRayCluster(BaseRayResource): @property def host(self) -> str: if self._host is None: - raise ValueError("RayClusterResource not initialized") + raise ValueError(f"{self.__class__.__name__} not initialized") return self._host @property @@ -101,15 +100,20 @@ def namespace(self) -> str: @property def cluster_name(self) -> str: if self._cluster_name is None: - raise ValueError("RayClusterResource not initialized") + raise ValueError(f"{self.__class__.__name__}not initialized") return self._cluster_name + def get_dagster_tags(self, context: InitResourceContext) -> Dict[str, str]: + tags = super().get_dagster_tags(context=context) + tags.update({"dagster.io/cluster": self.cluster_name, "dagster.io/deployment": self.deployment_name}) + return tags + @contextlib.contextmanager def yield_for_execution(self, context: InitResourceContext) -> Generator[Self, None, None]: assert context.log is not None assert context.dagster_run is not None - self.api.setup_for_execution(context) + self.client.setup_for_execution(context) self._cluster_name = self._get_ray_cluster_step_name(context) @@ -117,20 +121,18 @@ def yield_for_execution(self, context: InitResourceContext) -> Generator[Self, N try: # just a safety measure, no need to recreate the cluster for step retries or smth - if not self.api.kuberay.list_ray_clusters( - k8s_namespace=self.namespace, + if not self.client.client.list( + namespace=self.namespace, label_selector=f"dagster.io/cluster={self.cluster_name}", )["items"]: cluster_body = self._build_raycluster( image=(self.ray_cluster.image or context.dagster_run.tags["dagster/image"]), - labels=self._get_labels(context), + labels=self.get_dagster_tags(context), ) - resource = self.api.kuberay.create_ray_cluster(body=cluster_body, k8s_namespace=self.namespace) - if resource is None: - raise Exception( - f"Couldn't create RayCluster {self.namespace}/{self.cluster_name}! Reason logged above." - ) + resource = self.client.client.create(body=cluster_body, namespace=self.namespace) + if not resource: + raise RuntimeError(f"Couldn't create RayCluster {self.namespace}/{self.cluster_name}") context.log.info( f"Created RayCluster {self.namespace}/{self.cluster_name}. Waiting for it to become ready..." @@ -138,12 +140,9 @@ def yield_for_execution(self, context: InitResourceContext) -> Generator[Self, N self._wait_raycluster_ready() - # TODO: currently this will only work from withing the cluster - # find a way to make it work from outside - # probably would need a LoadBalancer/Ingress - self._host = self.api.kuberay.get_ray_cluster(name=self.cluster_name, k8s_namespace=self.namespace)[ - "status" - ]["head"]["serviceIP"] + self._host = self.client.client.get_status(name=self.cluster_name, namespace=self.namespace)[ # type: ignore + "head" + ]["serviceIP"] context.log.info("RayCluster is ready! Connection command:") @@ -168,27 +167,6 @@ def yield_for_execution(self, context: InitResourceContext) -> Generator[Self, N if self._context is not None: self._context.disconnect() - def _get_labels(self, context: InitResourceContext) -> Dict[str, str]: - assert context.dagster_run is not None - - labels = { - "dagster.io/run_id": cast(str, context.run_id), - "dagster.io/cluster": self.cluster_name, - "dagster.io/deployment": self.deployment_name, - # TODO: add more labels - } - - if context.dagster_run.tags.get("user"): - labels["dagster.io/user"] = context.dagster_run.tags["user"] - - if os.getenv("DAGSTER_CLOUD_GIT_BRANCH"): - labels["dagster.io/git-branch"] = os.environ["DAGSTER_CLOUD_GIT_BRANCH"] - - if os.getenv("DAGSTER_CLOUD_GIT_SHA"): - labels["dagster.io/git-sha"] = os.environ["DAGSTER_CLOUD_GIT_SHA"] - - return labels - def _build_raycluster( self, image: str, @@ -240,16 +218,14 @@ def update_group_spec(group_spec: Dict[str, Any]): } def _wait_raycluster_ready(self): - if not self.api.kuberay.wait_until_ray_cluster_running(self.cluster_name, k8s_namespace=self.namespace): - status = self.api.kuberay.get_ray_cluster_status(self.cluster_name, k8s_namespace=self.namespace) - raise Exception(f"RayCluster {self.namespace}/{self.cluster_name} failed to start: {status}") + self.client.client.wait_until_ready(self.cluster_name, namespace=self.namespace, timeout=self.timeout) # the above code only checks for RayCluster creation # not for head pod readiness - w = watch.Watch() + w = kubernetes.watch.Watch() for event in w.stream( - func=self.api.k8s_core.list_namespaced_pod, + func=self.client.k8s_core.list_namespaced_pod, namespace=self.namespace, label_selector=f"ray.io/cluster={self.cluster_name},ray.io/group=headgroup", timeout_seconds=60, @@ -267,7 +243,7 @@ def _maybe_cleanup_raycluster(self, context: InitResourceContext): assert context.log is not None if not self.skip_cleanup and cast(DagsterRun, context.dagster_run).status != DagsterRunStatus.FAILURE: - self.api.kuberay.delete_ray_cluster(self.cluster_name, k8s_namespace=self.namespace) + self.client.client.delete(self.cluster_name, namespace=self.namespace) context.log.info(f"Deleted RayCluster {self.namespace}/{self.cluster_name}") else: context.log.warning( @@ -281,8 +257,7 @@ def _get_ray_cluster_step_name(self, context: InitResourceContext) -> str: assert context.dagster_run is not None # try to make the name as short as possible - - cluster_name_prefix = f"dr-{self.deployment_name.replace('-', '')[:8]}-{context.run_id[:8]}" + cluster_name_prefix = f"dg-{self.deployment_name.replace('-', '')[:8]}-{context.run_id[:8]}" dagster_user_email = context.dagster_run.tags.get("user") if dagster_user_email is not None: diff --git a/dagster_ray/pipes.py b/dagster_ray/pipes.py new file mode 100644 index 0000000..2a46088 --- /dev/null +++ b/dagster_ray/pipes.py @@ -0,0 +1,85 @@ +import threading +from contextlib import contextmanager +from typing import TYPE_CHECKING, Iterator, Optional + +from dagster._annotations import experimental +from dagster._core.pipes.client import PipesMessageReader +from dagster._core.pipes.context import PipesMessageHandler +from dagster._core.pipes.utils import extract_message_or_forward_to_stdout +from dagster_pipes import PipesParams + +if TYPE_CHECKING: + from ray.job_submission import JobSubmissionClient + + +@experimental +class PipesRayJobSubmissionClientMessageReader(PipesMessageReader): + """ + Dagster Pipes message reader for recieving messages from a Ray job. + Will extract Dagster events and forward the rest to stdout. + """ + + def __init__(self, client: "JobSubmissionClient"): + self.client = client + self._handler: Optional["PipesMessageHandler"] = None + self._thread: Optional[threading.Thread] = None + self._terminate_reading = threading.Event() + + @property + def handler(self) -> "PipesMessageHandler": + if self._handler is None: + raise Exception("PipesMessageHandler is only available while reading messages in open_pipes_session") + + return self._handler + + @property + def thread(self) -> threading.Thread: + if self._thread is None: + raise Exception( + "Message reading thread is not available. " + "Did you call tail_job_logs with blocking=False inside open_pipes_session?" + ) + + return self._thread + + def terminate(self) -> None: + self._terminate_reading.set() + + @contextmanager + def read_messages(self, handler: "PipesMessageHandler") -> Iterator[PipesParams]: + # This method should start a thread to continuously read messages from some location + self._handler = handler + + try: + yield {} + finally: + self._handler = None + if self._thread is not None: + self.terminate() + self.thread.join() + + def tail_job_logs(self, client: "JobSubmissionClient", job_id: str, blocking: bool = False) -> None: + def _thread(): + import asyncio + + async_tailer = client.tail_job_logs(job_id=job_id) + + # Backward compatible sync generator + def tail_logs() -> Iterator[str]: + while True: + try: + yield asyncio.get_event_loop().run_until_complete(async_tailer.__anext__()) # type: ignore + except StopAsyncIteration: + break + + for log_line in tail_logs(): + if self._terminate_reading.is_set(): + break + else: + extract_message_or_forward_to_stdout(handler=self.handler, log_line=log_line) + + if blocking: + _thread() + else: + self._thread = threading.Thread(target=_thread, daemon=True) + self.thread.start() diff --git a/poetry.lock b/poetry.lock index baaf13f..f31c8f3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohttp" @@ -480,31 +480,31 @@ python-dateutil = "*" [[package]] name = "dagit" -version = "1.7.1" +version = "1.8.7" description = "Web UI for dagster." optional = false python-versions = "<3.13,>=3.8" files = [ - {file = "dagit-1.7.1-py3-none-any.whl", hash = "sha256:31638da09d7ab4efe4d62319f9674b6af82fcf8ccf3c1f867ac80479c11238e1"}, - {file = "dagit-1.7.1.tar.gz", hash = "sha256:59881bc4f6583d30d1fa9e0d6eb5f0e3eda3acdc889103434d89645e66a1d88d"}, + {file = "dagit-1.8.7-py3-none-any.whl", hash = "sha256:dc6b4dc2e8e3ce9d3a255c12a878d1fed884a351564556df8a6a7caa492be53d"}, + {file = "dagit-1.8.7.tar.gz", hash = "sha256:426bdbcd8ade1fc7f9b2d58cb3ed035828be0c50b80e19840b07618b6eca189c"}, ] [package.dependencies] -dagster-webserver = "1.7.1" +dagster-webserver = "1.8.7" [package.extras] -notebook = ["dagster-webserver[notebook] (==1.7.1)"] -test = ["dagster-webserver[test] (==1.7.1)"] +notebook = ["dagster-webserver[notebook] (==1.8.7)"] +test = ["dagster-webserver[test] (==1.8.7)"] [[package]] name = "dagster" -version = "1.7.1" +version = "1.8.7" description = "Dagster is an orchestration platform for the development, production, and observation of data assets." optional = false python-versions = "<3.13,>=3.8" files = [ - {file = "dagster-1.7.1-py3-none-any.whl", hash = "sha256:fa69b137f0db14279bca0c7831a352032148dd44a68c7de5004c59206a08f73d"}, - {file = "dagster-1.7.1.tar.gz", hash = "sha256:f5a947f582f93c1d85b4ce6d80c6365d9d97a248e54fb6822fc3b785e5d0e820"}, + {file = "dagster-1.8.7-py3-none-any.whl", hash = "sha256:7117eb7ba6486377bdb0fb985b82cc86e8c6debc5f60d92e0bbe5bcc02837fa4"}, + {file = "dagster-1.8.7.tar.gz", hash = "sha256:b50c43654a1c3685413fa3a73a1d0eb3f9be6b42d7a3f0b648954faa542facf4"}, ] [package.dependencies] @@ -512,24 +512,19 @@ alembic = ">=1.2.1,<1.6.3 || >1.6.3,<1.7.0 || >1.7.0,<1.11.0 || >1.11.0" click = ">=5.0" coloredlogs = ">=6.1,<=14.0" croniter = ">=0.3.34" -dagster-pipes = "1.7.1" +dagster-pipes = "1.8.7" docstring-parser = "*" +filelock = "*" grpcio = ">=1.44.0" grpcio-health-checking = ">=1.44.0" Jinja2 = "*" packaging = ">=20.9" -pendulum = [ - {version = ">=0.7.0,<3", markers = "python_version < \"3.9\""}, - {version = ">=0.7.0,<4", markers = "python_version >= \"3.9\" and python_version < \"3.12\""}, - {version = ">=3,<4", markers = "python_version >= \"3.12\""}, -] protobuf = [ {version = ">=3.20.0,<5", markers = "python_version < \"3.11\""}, {version = ">=4,<5", markers = "python_version >= \"3.11\""}, ] psutil = {version = ">=1.0", markers = "platform_system == \"Windows\""} -pydantic = ">1.10.0,<1.10.7 || >1.10.7,<3" -python-dateutil = "*" +pydantic = ">1.10.0,<1.10.7 || >1.10.7,<2.10" python-dotenv = "*" pytz = "*" pywin32 = {version = "!=226", markers = "platform_system == \"Windows\""} @@ -544,63 +539,80 @@ tomli = "<3" toposort = ">=1.0" tqdm = "<5" typing-extensions = ">=4.4.0,<5" +tzdata = {version = "*", markers = "platform_system == \"Windows\""} universal-pathlib = [ {version = "*", markers = "python_version < \"3.12\""}, {version = ">=0.2.0", markers = "python_version >= \"3.12\""}, ] -watchdog = ">=0.8.3" +watchdog = ">=0.8.3,<6" [package.extras] docker = ["docker"] mypy = ["mypy (==1.8.0)"] -pyright = ["pandas-stubs", "pyright (==1.1.356)", "types-PyYAML", "types-backports", "types-certifi", "types-chardet", "types-croniter", "types-cryptography", "types-mock", "types-paramiko", "types-pkg-resources", "types-pyOpenSSL", "types-python-dateutil", "types-pytz", "types-requests", "types-simplejson", "types-six", "types-sqlalchemy (==1.4.53.34)", "types-tabulate", "types-toml", "types-tzlocal"] -ruff = ["ruff (==0.3.4)"] -test = ["buildkite-test-collector", "docker", "grpcio-tools (>=1.44.0)", "mock (==3.0.5)", "morefs[asynclocal]", "mypy-protobuf", "objgraph", "pytest (>=7.0.1)", "pytest-cov (==2.10.1)", "pytest-mock (==3.3.1)", "pytest-rerunfailures (==10.0)", "pytest-xdist (==3.5.0)", "rapidfuzz", "responses (<=0.23.1)", "syrupy (>=4.0.0)", "tox (==3.25.0)"] +pyright = ["pandas-stubs", "pyright (==1.1.379)", "types-PyYAML", "types-backports", "types-certifi", "types-chardet", "types-croniter", "types-cryptography", "types-mock", "types-paramiko", "types-pyOpenSSL", "types-python-dateutil (>=2.9.0.20240316,<2.10.0.0)", "types-pytz", "types-requests", "types-simplejson", "types-six", "types-sqlalchemy (==1.4.53.34)", "types-tabulate", "types-toml", "types-tzlocal"] +ruff = ["ruff (==0.5.5)"] +test = ["buildkite-test-collector", "docker", "fsspec (<2024.5.0)", "grpcio-tools (>=1.44.0)", "mock (==3.0.5)", "morefs[asynclocal]", "mypy-protobuf", "objgraph", "pytest (>=8)", "pytest-asyncio", "pytest-cov (==5.0.0)", "pytest-mock (==3.14.0)", "pytest-rerunfailures (==14.0)", "pytest-xdist (==3.6.1)", "rapidfuzz", "responses (<=0.23.1)", "syrupy (>=4.0.0)", "tox (>=4)"] [[package]] name = "dagster-graphql" -version = "1.7.1" +version = "1.8.7" description = "The GraphQL frontend to python dagster." optional = false python-versions = "<3.13,>=3.8" files = [ - {file = "dagster-graphql-1.7.1.tar.gz", hash = "sha256:6227904a0fac4affdb70d2d662a6560847a91261a385a082405b4b6264f6a7bb"}, - {file = "dagster_graphql-1.7.1-py3-none-any.whl", hash = "sha256:96f805d4570c457de930d4aa5e3e268f4d222a3768d89517726389ad020d8799"}, + {file = "dagster-graphql-1.8.7.tar.gz", hash = "sha256:d5726f8c840c7a851ecf2d703e201da9866f4e3755941a9a4af8dd5652d8ee78"}, + {file = "dagster_graphql-1.8.7-py3-none-any.whl", hash = "sha256:52ac8091c346110145220fedbeaaeb90b68a4ace3f45b61b76918821cb549350"}, ] [package.dependencies] -dagster = "1.7.1" +dagster = "1.8.7" gql = {version = ">=3,<4", extras = ["requests"]} graphene = ">=3,<4" requests = "*" starlette = "*" +[[package]] +name = "dagster-k8s" +version = "0.24.7" +description = "A Dagster integration for k8s" +optional = false +python-versions = "<3.13,>=3.8" +files = [ + {file = "dagster-k8s-0.24.7.tar.gz", hash = "sha256:22ccee333da8910c597fafc55aca492225f436efb53cd3166f369d3b11133d3f"}, + {file = "dagster_k8s-0.24.7-py3-none-any.whl", hash = "sha256:5012b784eb68e1fa588f26c97f4b262c30a94813a258a666bc01e920504b7b6c"}, +] + +[package.dependencies] +dagster = "1.8.7" +google-auth = "!=2.23.1" +kubernetes = "*" + [[package]] name = "dagster-pipes" -version = "1.7.1" +version = "1.8.7" description = "Toolkit for Dagster integrations with transform logic outside of Dagster" optional = false python-versions = "<3.13,>=3.8" files = [ - {file = "dagster-pipes-1.7.1.tar.gz", hash = "sha256:a113d3e0a4fc979a2147bb51f944f175f634befa028fe45d78e9b5fb076fcaea"}, - {file = "dagster_pipes-1.7.1-py3-none-any.whl", hash = "sha256:03ae08517bfdad4ba3d4875720f215cc76f429ae8242e611ca0b8b3c25cd69a5"}, + {file = "dagster-pipes-1.8.7.tar.gz", hash = "sha256:9808077299bb9c753311926e83c13abfcb5502d6f444291370d2455f188ef98f"}, + {file = "dagster_pipes-1.8.7-py3-none-any.whl", hash = "sha256:ff1bae741a26d37cdacbc524a65d206cbe848d2104beafe96cd32aab2e450fb3"}, ] [[package]] name = "dagster-webserver" -version = "1.7.1" +version = "1.8.7" description = "Web UI for dagster." optional = false python-versions = "<3.13,>=3.8" files = [ - {file = "dagster-webserver-1.7.1.tar.gz", hash = "sha256:03bbab1ba8ce0750bb3784fb6e3eed00cbd8dae3dc70ef1cc5490c83abd197a7"}, - {file = "dagster_webserver-1.7.1-py3-none-any.whl", hash = "sha256:2d9eda1ff26c97efd71753582ef75e6b93f9bc45a6e0a5e5ea8036e0f5f0b694"}, + {file = "dagster-webserver-1.8.7.tar.gz", hash = "sha256:3cac7e246eb97c8c1c2ef04e2658763b5ec30e55db9490ee1cdfd35b48c12bb4"}, + {file = "dagster_webserver-1.8.7-py3-none-any.whl", hash = "sha256:958a9847fe8b9895cb23d83b7f3fc6b01c4d639c15a04c8c4c9b3a4c11453eae"}, ] [package.dependencies] click = ">=7.0,<9.0" -dagster = "1.7.1" -dagster-graphql = "1.7.1" +dagster = "1.8.7" +dagster-graphql = "1.8.7" starlette = "!=0.36.0" uvicorn = {version = "*", extras = ["standard"]} @@ -674,6 +686,13 @@ files = [ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d"}, {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393"}, {file = "dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e"}, + {file = "dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715"}, {file = "dm_tree-0.1.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8c60a7eadab64c2278861f56bca320b2720f163dca9d7558103c3b77f2416571"}, {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af4b3d372f2477dcd89a6e717e4a575ca35ccc20cc4454a8a4b6f8838a00672d"}, {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de287fabc464b8734be251e46e06aa9aa1001f34198da2b6ce07bd197172b9cb"}, @@ -1045,6 +1064,7 @@ files = [ {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d967650d3f56af314b72df7089d96cda1083a7fc2da05b375d2bc48c82ab3f3c"}, {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, @@ -1053,6 +1073,7 @@ files = [ {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d4606a527e30548153be1a9f155f4e283d109ffba663a15856089fb55f933e47"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, @@ -1082,6 +1103,7 @@ files = [ {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1087300cf9700bbf455b1b97e24db18f2f77b55302a68272c56209d5587c12d1"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, @@ -1090,6 +1112,7 @@ files = [ {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8512a0c38cfd4e66a858ddd1b17705587900dd760c6003998e9472b77b56d417"}, {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, @@ -1690,6 +1713,16 @@ files = [ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, @@ -2298,139 +2331,6 @@ files = [ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] -[[package]] -name = "pendulum" -version = "2.1.2" -description = "Python datetimes made easy" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"}, - {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"}, - {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"}, - {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"}, - {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"}, - {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"}, - {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"}, - {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"}, - {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"}, - {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"}, - {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"}, -] - -[package.dependencies] -python-dateutil = ">=2.6,<3.0" -pytzdata = ">=2020.1" - -[[package]] -name = "pendulum" -version = "3.0.0" -description = "Python datetimes made easy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pendulum-3.0.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2cf9e53ef11668e07f73190c805dbdf07a1939c3298b78d5a9203a86775d1bfd"}, - {file = "pendulum-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fb551b9b5e6059377889d2d878d940fd0bbb80ae4810543db18e6f77b02c5ef6"}, - {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c58227ac260d5b01fc1025176d7b31858c9f62595737f350d22124a9a3ad82d"}, - {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60fb6f415fea93a11c52578eaa10594568a6716602be8430b167eb0d730f3332"}, - {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b69f6b4dbcb86f2c2fe696ba991e67347bcf87fe601362a1aba6431454b46bde"}, - {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:138afa9c373ee450ede206db5a5e9004fd3011b3c6bbe1e57015395cd076a09f"}, - {file = "pendulum-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:83d9031f39c6da9677164241fd0d37fbfc9dc8ade7043b5d6d62f56e81af8ad2"}, - {file = "pendulum-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0c2308af4033fa534f089595bcd40a95a39988ce4059ccd3dc6acb9ef14ca44a"}, - {file = "pendulum-3.0.0-cp310-none-win_amd64.whl", hash = "sha256:9a59637cdb8462bdf2dbcb9d389518c0263799189d773ad5c11db6b13064fa79"}, - {file = "pendulum-3.0.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3725245c0352c95d6ca297193192020d1b0c0f83d5ee6bb09964edc2b5a2d508"}, - {file = "pendulum-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6c035f03a3e565ed132927e2c1b691de0dbf4eb53b02a5a3c5a97e1a64e17bec"}, - {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597e66e63cbd68dd6d58ac46cb7a92363d2088d37ccde2dae4332ef23e95cd00"}, - {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99a0f8172e19f3f0c0e4ace0ad1595134d5243cf75985dc2233e8f9e8de263ca"}, - {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:77d8839e20f54706aed425bec82a83b4aec74db07f26acd039905d1237a5e1d4"}, - {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afde30e8146292b059020fbc8b6f8fd4a60ae7c5e6f0afef937bbb24880bdf01"}, - {file = "pendulum-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:660434a6fcf6303c4efd36713ca9212c753140107ee169a3fc6c49c4711c2a05"}, - {file = "pendulum-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dee9e5a48c6999dc1106eb7eea3e3a50e98a50651b72c08a87ee2154e544b33e"}, - {file = "pendulum-3.0.0-cp311-none-win_amd64.whl", hash = "sha256:d4cdecde90aec2d67cebe4042fd2a87a4441cc02152ed7ed8fb3ebb110b94ec4"}, - {file = "pendulum-3.0.0-cp311-none-win_arm64.whl", hash = "sha256:773c3bc4ddda2dda9f1b9d51fe06762f9200f3293d75c4660c19b2614b991d83"}, - {file = "pendulum-3.0.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:409e64e41418c49f973d43a28afe5df1df4f1dd87c41c7c90f1a63f61ae0f1f7"}, - {file = "pendulum-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a38ad2121c5ec7c4c190c7334e789c3b4624798859156b138fcc4d92295835dc"}, - {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fde4d0b2024b9785f66b7f30ed59281bd60d63d9213cda0eb0910ead777f6d37"}, - {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2c5675769fb6d4c11238132962939b960fcb365436b6d623c5864287faa319"}, - {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8af95e03e066826f0f4c65811cbee1b3123d4a45a1c3a2b4fc23c4b0dff893b5"}, - {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2165a8f33cb15e06c67070b8afc87a62b85c5a273e3aaa6bc9d15c93a4920d6f"}, - {file = "pendulum-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ad5e65b874b5e56bd942546ea7ba9dd1d6a25121db1c517700f1c9de91b28518"}, - {file = "pendulum-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17fe4b2c844bbf5f0ece69cfd959fa02957c61317b2161763950d88fed8e13b9"}, - {file = "pendulum-3.0.0-cp312-none-win_amd64.whl", hash = "sha256:78f8f4e7efe5066aca24a7a57511b9c2119f5c2b5eb81c46ff9222ce11e0a7a5"}, - {file = "pendulum-3.0.0-cp312-none-win_arm64.whl", hash = "sha256:28f49d8d1e32aae9c284a90b6bb3873eee15ec6e1d9042edd611b22a94ac462f"}, - {file = "pendulum-3.0.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d4e2512f4e1a4670284a153b214db9719eb5d14ac55ada5b76cbdb8c5c00399d"}, - {file = "pendulum-3.0.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:3d897eb50883cc58d9b92f6405245f84b9286cd2de6e8694cb9ea5cb15195a32"}, - {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e169cc2ca419517f397811bbe4589cf3cd13fca6dc38bb352ba15ea90739ebb"}, - {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f17c3084a4524ebefd9255513692f7e7360e23c8853dc6f10c64cc184e1217ab"}, - {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:826d6e258052715f64d05ae0fc9040c0151e6a87aae7c109ba9a0ed930ce4000"}, - {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2aae97087872ef152a0c40e06100b3665d8cb86b59bc8471ca7c26132fccd0f"}, - {file = "pendulum-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac65eeec2250d03106b5e81284ad47f0d417ca299a45e89ccc69e36130ca8bc7"}, - {file = "pendulum-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a5346d08f3f4a6e9e672187faa179c7bf9227897081d7121866358af369f44f9"}, - {file = "pendulum-3.0.0-cp37-none-win_amd64.whl", hash = "sha256:235d64e87946d8f95c796af34818c76e0f88c94d624c268693c85b723b698aa9"}, - {file = "pendulum-3.0.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:6a881d9c2a7f85bc9adafcfe671df5207f51f5715ae61f5d838b77a1356e8b7b"}, - {file = "pendulum-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d7762d2076b9b1cb718a6631ad6c16c23fc3fac76cbb8c454e81e80be98daa34"}, - {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e8e36a8130819d97a479a0e7bf379b66b3b1b520e5dc46bd7eb14634338df8c"}, - {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7dc843253ac373358ffc0711960e2dd5b94ab67530a3e204d85c6e8cb2c5fa10"}, - {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a78ad3635d609ceb1e97d6aedef6a6a6f93433ddb2312888e668365908c7120"}, - {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a137e9e0d1f751e60e67d11fc67781a572db76b2296f7b4d44554761049d6"}, - {file = "pendulum-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c95984037987f4a457bb760455d9ca80467be792236b69d0084f228a8ada0162"}, - {file = "pendulum-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d29c6e578fe0f893766c0d286adbf0b3c726a4e2341eba0917ec79c50274ec16"}, - {file = "pendulum-3.0.0-cp38-none-win_amd64.whl", hash = "sha256:deaba8e16dbfcb3d7a6b5fabdd5a38b7c982809567479987b9c89572df62e027"}, - {file = "pendulum-3.0.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b11aceea5b20b4b5382962b321dbc354af0defe35daa84e9ff3aae3c230df694"}, - {file = "pendulum-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a90d4d504e82ad236afac9adca4d6a19e4865f717034fc69bafb112c320dcc8f"}, - {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:825799c6b66e3734227756fa746cc34b3549c48693325b8b9f823cb7d21b19ac"}, - {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad769e98dc07972e24afe0cff8d365cb6f0ebc7e65620aa1976fcfbcadc4c6f3"}, - {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6fc26907eb5fb8cc6188cc620bc2075a6c534d981a2f045daa5f79dfe50d512"}, - {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c717eab1b6d898c00a3e0fa7781d615b5c5136bbd40abe82be100bb06df7a56"}, - {file = "pendulum-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3ddd1d66d1a714ce43acfe337190be055cdc221d911fc886d5a3aae28e14b76d"}, - {file = "pendulum-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:822172853d7a9cf6da95d7b66a16c7160cb99ae6df55d44373888181d7a06edc"}, - {file = "pendulum-3.0.0-cp39-none-win_amd64.whl", hash = "sha256:840de1b49cf1ec54c225a2a6f4f0784d50bd47f68e41dc005b7f67c7d5b5f3ae"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b1f74d1e6ffe5d01d6023870e2ce5c2191486928823196f8575dcc786e107b1"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:729e9f93756a2cdfa77d0fc82068346e9731c7e884097160603872686e570f07"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e586acc0b450cd21cbf0db6bae386237011b75260a3adceddc4be15334689a9a"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22e7944ffc1f0099a79ff468ee9630c73f8c7835cd76fdb57ef7320e6a409df4"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fa30af36bd8e50686846bdace37cf6707bdd044e5cb6e1109acbad3277232e04"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:440215347b11914ae707981b9a57ab9c7b6983ab0babde07063c6ee75c0dc6e7"}, - {file = "pendulum-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:314c4038dc5e6a52991570f50edb2f08c339debdf8cea68ac355b32c4174e820"}, - {file = "pendulum-3.0.0-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5acb1d386337415f74f4d1955c4ce8d0201978c162927d07df8eb0692b2d8533"}, - {file = "pendulum-3.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a789e12fbdefaffb7b8ac67f9d8f22ba17a3050ceaaa635cd1cc4645773a4b1e"}, - {file = "pendulum-3.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:860aa9b8a888e5913bd70d819306749e5eb488e6b99cd6c47beb701b22bdecf5"}, - {file = "pendulum-3.0.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5ebc65ea033ef0281368217fbf59f5cb05b338ac4dd23d60959c7afcd79a60a0"}, - {file = "pendulum-3.0.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9fef18ab0386ef6a9ac7bad7e43ded42c83ff7ad412f950633854f90d59afa8"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1c134ba2f0571d0b68b83f6972e2307a55a5a849e7dac8505c715c531d2a8795"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:385680812e7e18af200bb9b4a49777418c32422d05ad5a8eb85144c4a285907b"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eec91cd87c59fb32ec49eb722f375bd58f4be790cae11c1b70fac3ee4f00da0"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4386bffeca23c4b69ad50a36211f75b35a4deb6210bdca112ac3043deb7e494a"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dfbcf1661d7146d7698da4b86e7f04814221081e9fe154183e34f4c5f5fa3bf8"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:04a1094a5aa1daa34a6b57c865b25f691848c61583fb22722a4df5699f6bf74c"}, - {file = "pendulum-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5b0ec85b9045bd49dd3a3493a5e7ddfd31c36a2a60da387c419fa04abcaecb23"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0a15b90129765b705eb2039062a6daf4d22c4e28d1a54fa260892e8c3ae6e157"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:bb8f6d7acd67a67d6fedd361ad2958ff0539445ef51cbe8cd288db4306503cd0"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd69b15374bef7e4b4440612915315cc42e8575fcda2a3d7586a0d88192d0c88"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc00f8110db6898360c53c812872662e077eaf9c75515d53ecc65d886eec209a"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:83a44e8b40655d0ba565a5c3d1365d27e3e6778ae2a05b69124db9e471255c4a"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1a3604e9fbc06b788041b2a8b78f75c243021e0f512447806a6d37ee5214905d"}, - {file = "pendulum-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:92c307ae7accebd06cbae4729f0ba9fa724df5f7d91a0964b1b972a22baa482b"}, - {file = "pendulum-3.0.0.tar.gz", hash = "sha256:5d034998dea404ec31fae27af6b22cff1708f830a1ed7353be4d1019bb9f584e"}, -] - -[package.dependencies] -python-dateutil = ">=2.6" -tzdata = ">=2020.1" - -[package.extras] -test = ["time-machine (>=2.6.0)"] - [[package]] name = "pillow" version = "10.3.0" @@ -2931,17 +2831,6 @@ files = [ {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, ] -[[package]] -name = "pytzdata" -version = "2020.1" -description = "The Olson timezone database for Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"}, - {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, -] - [[package]] name = "pywavelets" version = "1.4.1" @@ -3014,6 +2903,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3021,8 +2911,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3039,6 +2937,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3046,6 +2945,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -4317,9 +4217,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -kuberay = ["kubernetes", "pyyaml"] +kuberay = ["dagster-k8s", "kubernetes", "pyyaml"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<3.13" -content-hash = "9ae768ec0503b1419022a71e97a548552d70df683128d580151853c03071c730" +content-hash = "86d6ef5236dd4b745b806042dbc169890de13003d0db1bb7f43566ea3f9a8729" diff --git a/pyproject.toml b/pyproject.toml index 2e82941..486778c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,17 +27,20 @@ license = "Apache-2.0" [tool.poetry.dependencies] python = ">=3.8.1,<3.13" +dagster = ">=1.8.0" pyyaml = ">=4.0.0" -kubernetes = ">=20.0.0" # no idea what's a good lower bound tenacity = ">=8.0.0" -ray = {extras = ["all"], version = ">=2.7.0"} -dagster = ">=1.6.0" + +ray = { version = ">=2.7.0", extras = ["all"] } +kubernetes = { version = ">=20.0.0" } +dagster-k8s = { version = ">=0.19.0" } [tool.poetry.extras] kuberay = [ "pyyaml", "kubernetes", "python-client", + "dagster-k8s", ] [tool.poetry.group.dev.dependencies] @@ -101,10 +104,15 @@ exclude = [ "venv", ] [tool.ruff.lint] -extend-select = ["I"] +extend-select = ["I", "TID252", "TID253"] [tool.ruff.lint.isort] known-first-party = ["dagster_ray", "tests"] +[tool.ruff.lint.flake8-tidy-imports] +# Disallow all relative imports. +ban-relative-imports = "all" +banned-module-level-imports = ["ray"] + [tool.pyright] reportPropertyTypeMismatch = true reportImportCycles = true diff --git a/tests/test_kuberay/__init__.py b/tests/test_kuberay/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_kuberay.py b/tests/test_kuberay/test_raycluster.py similarity index 83% rename from tests/test_kuberay.py rename to tests/test_kuberay/test_raycluster.py index 834359d..89e4c09 100644 --- a/tests/test_kuberay.py +++ b/tests/test_kuberay/test_raycluster.py @@ -8,16 +8,16 @@ import pytest import pytest_cases -import ray +import ray # noqa: TID253 from dagster import AssetExecutionContext, RunConfig, asset, materialize_to_memory from pytest_kubernetes.options import ClusterOptions from pytest_kubernetes.providers import AClusterManager, select_provider_manager from dagster_ray import RayResource -from dagster_ray.kuberay import KubeRayAPI, KubeRayCluster, RayClusterConfig, cleanup_kuberay_clusters +from dagster_ray.kuberay import KubeRayCluster, RayClusterClientResource, RayClusterConfig, cleanup_kuberay_clusters +from dagster_ray.kuberay.client import RayClusterClient from dagster_ray.kuberay.configs import DEFAULT_HEAD_GROUP_SPEC, DEFAULT_WORKER_GROUP_SPECS from dagster_ray.kuberay.ops import CleanupKuberayClustersConfig -from dagster_ray.kuberay.ray_cluster_api import RayClusterApi from tests import ROOT_DIR @@ -72,14 +72,14 @@ def dagster_ray_image(): # TODO: it's easy to parametrize over different versions of k8s # but it would take quite some time to test all of them! # probably should only do it in CI -KUBERNETES_VERSION = "1.25.3" +KUBERNETES_VERSION = os.getenv("PYTEST_KUBERNETES_VERSION", "1.31.0") -KUBERAY_VERSIONS = os.environ.get("PYTEST_KUBERAY_VERSIONS", "1.1.0").split(",") +KUBERAY_VERSIONS = os.getenv("PYTEST_KUBERAY_VERSIONS", "1.2.0").split(",") @pytest_cases.fixture(scope="session") @pytest.mark.parametrize("kuberay_version", KUBERAY_VERSIONS) -def k8s_with_raycluster( +def k8s_with_kuberay( request, kuberay_helm_repo, dagster_ray_image: str, kuberay_version: str ) -> Generator[AClusterManager, None, None]: k8s = select_provider_manager("minikube")("dagster-ray") @@ -141,7 +141,7 @@ def worker_group_specs(dagster_ray_image: str) -> List[Dict[str, Any]]: @pytest.fixture(scope="session") def ray_cluster_resource( - k8s_with_raycluster: AClusterManager, + k8s_with_kuberay: AClusterManager, dagster_ray_image: str, head_group_spec: Dict[str, Any], worker_group_specs: List[Dict[str, Any]], @@ -152,7 +152,7 @@ def ray_cluster_resource( # have have to first run port-forwarding with minikube # we can only init ray after that skip_init=True, - api=KubeRayAPI(kubeconfig_file=str(k8s_with_raycluster.kubeconfig)), + client=RayClusterClientResource(kubeconfig_file=str(k8s_with_kuberay.kubeconfig)), ray_cluster=RayClusterConfig( image=dagster_ray_image, head_group_spec=head_group_spec, @@ -164,7 +164,7 @@ def ray_cluster_resource( @pytest.fixture(scope="session") def ray_cluster_resource_skip_cleanup( - k8s_with_raycluster: AClusterManager, + k8s_with_kuberay: AClusterManager, dagster_ray_image: str, head_group_spec: Dict[str, Any], worker_group_specs: List[Dict[str, Any]], @@ -176,7 +176,7 @@ def ray_cluster_resource_skip_cleanup( # we can only init ray after that skip_init=True, skip_cleanup=True, - api=KubeRayAPI(kubeconfig_file=str(k8s_with_raycluster.kubeconfig)), + client=RayClusterClientResource(kubeconfig_file=str(k8s_with_kuberay.kubeconfig)), ray_cluster=RayClusterConfig( image=dagster_ray_image, head_group_spec=head_group_spec, @@ -193,7 +193,7 @@ def get_hostname(): def test_kuberay_cluster_resource( ray_cluster_resource: KubeRayCluster, - k8s_with_raycluster: AClusterManager, + k8s_with_kuberay: AClusterManager, ): @asset # testing RayResource type annotation too! @@ -203,7 +203,7 @@ def my_asset(context: AssetExecutionContext, ray_cluster: RayResource) -> None: assert isinstance(ray_cluster, KubeRayCluster) - with k8s_with_raycluster.port_forwarding( + with k8s_with_kuberay.port_forwarding( target=f"svc/{ray_cluster.cluster_name}-head-svc", source_port=cast(int, ray_cluster.redis_port), target_port=10001, @@ -219,8 +219,8 @@ def my_asset(context: AssetExecutionContext, ray_cluster: RayResource) -> None: # not in localhost assert ray_cluster.cluster_name in ray.get(get_hostname.remote()) - ray_cluster_description = ray_cluster.api.kuberay.get_ray_cluster( - ray_cluster.cluster_name, k8s_namespace=ray_cluster.namespace + ray_cluster_description = ray_cluster.client.client.get( + ray_cluster.cluster_name, namespace=ray_cluster.namespace ) assert ray_cluster_description["metadata"]["labels"]["dagster.io/run_id"] == context.run_id assert ray_cluster_description["metadata"]["labels"]["dagster.io/cluster"] == ray_cluster.cluster_name @@ -230,14 +230,14 @@ def my_asset(context: AssetExecutionContext, ray_cluster: RayResource) -> None: resources={"ray_cluster": ray_cluster_resource}, ) - kuberay_api = RayClusterApi(config_file=str(k8s_with_raycluster.kubeconfig)) + kuberay_client = RayClusterClient(config_file=str(k8s_with_kuberay.kubeconfig)) # make sure the RayCluster is cleaned up assert ( len( - kuberay_api.list_ray_clusters( - k8s_namespace=ray_cluster_resource.namespace, label_selector=f"dagster.io/run_id={result.run_id}" + kuberay_client.list( + namespace=ray_cluster_resource.namespace, label_selector=f"dagster.io/run_id={result.run_id}" )["items"] ) == 0 @@ -246,7 +246,7 @@ def my_asset(context: AssetExecutionContext, ray_cluster: RayResource) -> None: def test_kuberay_cleanup_job( ray_cluster_resource_skip_cleanup: KubeRayCluster, - k8s_with_raycluster: AClusterManager, + k8s_with_kuberay: AClusterManager, ): @asset def my_asset(ray_cluster: RayResource) -> None: @@ -257,12 +257,12 @@ def my_asset(ray_cluster: RayResource) -> None: resources={"ray_cluster": ray_cluster_resource_skip_cleanup}, ) - kuberay_api = RayClusterApi(config_file=str(k8s_with_raycluster.kubeconfig)) + kuberay_client = RayClusterClient(config_file=str(k8s_with_kuberay.kubeconfig)) assert ( len( - kuberay_api.list_ray_clusters( - k8s_namespace=ray_cluster_resource_skip_cleanup.namespace, + kuberay_client.list( + namespace=ray_cluster_resource_skip_cleanup.namespace, label_selector=f"dagster.io/run_id={result.run_id}", )["items"] ) @@ -271,17 +271,17 @@ def my_asset(ray_cluster: RayResource) -> None: cleanup_kuberay_clusters.execute_in_process( resources={ - "kuberay_api": KubeRayAPI(kubeconfig_file=str(k8s_with_raycluster.kubeconfig)), + "client": RayClusterClientResource(kubeconfig_file=str(k8s_with_kuberay.kubeconfig)), }, run_config=RunConfig( ops={ - "cleanup_kuberay_clusters": CleanupKuberayClustersConfig( + "cleanup_kuberay_rayclusters": CleanupKuberayClustersConfig( namespace=ray_cluster_resource_skip_cleanup.namespace, ) } ), ) - assert not kuberay_api.list_ray_clusters( - k8s_namespace=ray_cluster_resource_skip_cleanup.namespace, label_selector=f"dagster.io/run_id={result.run_id}" + assert not kuberay_client.list( + namespace=ray_cluster_resource_skip_cleanup.namespace, label_selector=f"dagster.io/run_id={result.run_id}" )["items"]