Skip to content

Commit

Permalink
Update notebooks and docs with updated Cluster Configuration args
Browse files Browse the repository at this point in the history
  • Loading branch information
Ygnas authored and openshift-merge-bot[bot] committed Aug 22, 2024
1 parent ee307a9 commit 95b2165
Show file tree
Hide file tree
Showing 13 changed files with 79 additions and 80 deletions.
12 changes: 6 additions & 6 deletions demo-notebooks/additional-demos/hf_interactive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster_name= \"hfgputest\"\n",
"cluster = Cluster(ClusterConfiguration(name=cluster_name, \n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=1,\n",
" min_cpus=8, \n",
" max_cpus=8, \n",
" min_memory=16, \n",
" max_memory=16, \n",
" worker_cpu_requests=8, \n",
" worker_cpu_limits=8, \n",
" worker_memory_requests=16, \n",
" worker_memory_limits=16, \n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/additional-demos/local_interactive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@
"\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name=cluster_name,\n",
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=0,\n",
" head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':0},\n",
" num_workers=1,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/additional-demos/ray_job_client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name='jobtest',\n",
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=0,\n",
" head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':0},\n",
" num_workers=2,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/guided-demos/0_basic_ray.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,13 @@
" name='raytest', \n",
" head_cpus='500m',\n",
" head_memory=2,\n",
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=0,\n",
" head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':0},\n",
" num_workers=2,\n",
" min_cpus='250m',\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests='250m',\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/guided-demos/1_cluster_job_client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,13 @@
" name='jobtest',\n",
" head_cpus=1,\n",
" head_memory=4,\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus='250m',\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests='250m',\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/guided-demos/2_basic_interactive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,13 @@
" name=cluster_name,\n",
" head_cpus=1,\n",
" head_memory=6,\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus='250m',\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=6,\n",
" worker_cpu_requests='250m',\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=6,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name='raytest',\n",
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=0,\n",
" head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':0},\n",
" num_workers=2,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name='jobtest',\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,13 @@
"cluster_name = \"interactivetest\"\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name=cluster_name,\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus=2,\n",
" max_cpus=2,\n",
" min_memory=8,\n",
" max_memory=8,\n",
" worker_cpu_requests=2,\n",
" worker_cpu_limits=2,\n",
" worker_memory_requests=8,\n",
" worker_memory_limits=8,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
12 changes: 6 additions & 6 deletions demo-notebooks/guided-demos/preview_nbs/0_basic_ray.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name='raytest',\n",
" head_gpus=0, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=0,\n",
" head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':0},\n",
" num_workers=2,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,13 @@
"# The SDK will try to find the name of your default local queue based on the annotation \"kueue.x-k8s.io/default-queue\": \"true\" unless you specify the local queue manually below\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name='jobtest',\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus=1,\n",
" max_cpus=1,\n",
" min_memory=4,\n",
" max_memory=4,\n",
" worker_cpu_requests=1,\n",
" worker_cpu_limits=1,\n",
" worker_memory_requests=4,\n",
" worker_memory_limits=4,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources\n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@
"cluster_name = \"interactivetest\"\n",
"cluster = Cluster(ClusterConfiguration(\n",
" name=cluster_name,\n",
" head_gpus=1, # For GPU enabled workloads set the head_gpus and num_gpus\n",
" num_gpus=1,\n",
" head_extended_resource_requests={'nvidia.com/gpu':1}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n",
" worker_extended_resource_requests={'nvidia.com/gpu':1},\n",
" num_workers=2,\n",
" min_cpus=2,\n",
" max_cpus=2,\n",
" min_memory=8,\n",
" max_memory=8,\n",
" worker_cpu_requests=2,\n",
" worker_cpu_limits=2,\n",
" worker_memory_requests=8,\n",
" worker_memory_limits=8,\n",
" # image=\"\", # Optional Field \n",
" write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources \n",
" # local_queue=\"local-queue-name\" # Specify the local queue manually\n",
Expand Down
15 changes: 7 additions & 8 deletions docs/cluster-configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,13 @@ cluster = Cluster(ClusterConfiguration(
namespace='default', # Default None
head_cpus=1, # Default 2
head_memory=1, # Default 8
head_gpus=0, # Default 0
num_gpus=0, # Default 0
head_extended_resource_requests={'nvidia.com/gpu':0}, # Default 0
worker_extended_resource_requests={'nvidia.com/gpu':0}, # Default 0
num_workers=1, # Default 1
min_cpus=1, # Default 1
max_cpus=1, # Default 1
min_memory=2, # Default 2
max_memory=2, # Default 2
num_gpus=0, # Default 0
worker_cpu_requests=1, # Default 1
worker_cpu_limits=1, # Default 1
worker_memory_requests=2, # Default 2
worker_memory_limits=2, # Default 2
# image="", # Optional Field
machine_types=["m5.xlarge", "g4dn.xlarge"],
labels={"exampleLabel": "example", "secondLabel": "example"},
Expand All @@ -28,4 +27,4 @@ Note: 'quay.io/rhoai/ray:2.23.0-py39-cu121' is the default community image used

The `labels={"exampleLabel": "example"}` parameter can be used to apply additional labels to the RayCluster resource.

After creating their`cluster`, a user can call `cluster.up()` and `cluster.down()` to respectively create or remove the Ray Cluster.
After creating their `cluster`, a user can call `cluster.up()` and `cluster.down()` to respectively create or remove the Ray Cluster.

0 comments on commit 95b2165

Please sign in to comment.