From 5f3c6ae40205168d14103c3cc45938756a0f70c6 Mon Sep 17 00:00:00 2001 From: Bill Arnold Date: Thu, 10 Aug 2023 18:17:14 -0400 Subject: [PATCH 1/5] Updates for new release of Cerebras software 1.9.1 --- .../cerebras/customizing-environment.md | 32 ++++++---- docs/ai-testbed/cerebras/example-programs.md | 48 +++++++------- .../cerebras/job-queuing-and-submission.md | 37 +++++------ .../cerebras/running-a-model-or-program.md | 63 +++++-------------- mkdocs.yml | 2 +- 5 files changed, 76 insertions(+), 106 deletions(-) diff --git a/docs/ai-testbed/cerebras/customizing-environment.md b/docs/ai-testbed/cerebras/customizing-environment.md index ae70a0a58..eec6ac8f7 100644 --- a/docs/ai-testbed/cerebras/customizing-environment.md +++ b/docs/ai-testbed/cerebras/customizing-environment.md @@ -5,29 +5,37 @@ #### To make a PyTorch virtual environment for Cerebras ```console -mkdir ~/R_1.8.0 -cd ~/R_1.8.0 +#Make your home directory navigable +chmod a+xr ~/ +mkdir ~/R_1.9.1 +chmod a+x ~/R_1.9.1/ +cd ~/R_1.9.1 # Note: "deactivate" does not actually work in scripts. deactivate rm -r venv_pt -/software/cerebras/python3.7/bin/python3.7 -m venv venv_pt +/software/cerebras/python3.8/bin/python3.8 -m venv venv_pt source venv_pt/bin/activate -pip3 install --disable-pip-version-check /opt/cerebras/wheels/cerebras_pytorch-1.8.0+de49801ca3-py3-none-any.whl --find-links=/opt/cerebras/wheels/ +pip3 install /opt/cerebras/wheels/cerebras_pytorch-1.9.1+1cf4d0632b-cp38-cp38-linux_x86_64.whl --find-links=/opt/cerebras/wheels +pip install numpy==1.23.4 +pip install datasets transformers ``` #### To make a TensorFlow virtual environment for Cerebras ```console -mkdir ~/R_1.8.0 -cd ~/R_1.8.0 +chmod a+xr ~/ +mkdir ~/R_1.9.1 +chmod a+x ~/R_1.9.1/ +cd ~/R_1.9.1 # Note: "deactivate" does not actually work in scripts. deactivate rm -r venv_tf -/software/cerebras/python3.7/bin/python3.7 -m venv venv_tf +/software/cerebras/python3.8/bin/python3.8 -m venv venv_tf source venv_tf/bin/activate -pip install tensorflow_datasets -pip install spacy -pip3 install --disable-pip-version-check /opt/cerebras/wheels/cerebras_tensorflow-1.8.0+de49801ca3-py3-none-any.whl --find-links=/opt/cerebras/wheels/ +#pip install tensorflow_datasets +#pip install spacy +pip3 install /opt/cerebras/wheels/cerebras_tensorflow-1.9.1+1cf4d0632b-cp38-cp38-linux_x86_64.whl --find-links=/opt/cerebras/wheels/ +pip install numpy==1.23.4 ``` #### Activation and deactivation @@ -35,13 +43,13 @@ pip3 install --disable-pip-version-check /opt/cerebras/wheels/cerebras_tensorflo To activate one of these virtual environments, ```console -source ~/R_1.8.0/venv_pt/bin/activate +source ~/R_1.9.1/venv_pt/bin/activate ``` or ```console -source ~/R_1.8.0/venv_tf/bin/activate +source ~/R_1.9.1/venv_tf/bin/activate ``` To deactivate a virtual environment, diff --git a/docs/ai-testbed/cerebras/example-programs.md b/docs/ai-testbed/cerebras/example-programs.md index be07e5aa6..d4ae05555 100644 --- a/docs/ai-testbed/cerebras/example-programs.md +++ b/docs/ai-testbed/cerebras/example-programs.md @@ -4,12 +4,12 @@ Make a working directory and a local copy of the Cerebras **modelzoo** and **anl_shared** repository, if not previously done, as follows. ```bash -mkdir ~/R_1.8.0 -cd ~/R_1.8.0 +mkdir ~/R_1.9.1 +cd ~/R_1.9.1 git clone https://github.com/Cerebras/modelzoo.git ``` ## UNet @@ -19,17 +19,17 @@ To run Unet with the ```console TODO -cd ~/R_1.8.0/anl_shared/braggnn/tf +cd ~/R_1.9.1/anl_shared/braggnn/tf # This yaml has a correct path to a BraggNN dataset cp /software/cerebras/dataset/BraggN/params_bragg_nonlocal_sampleds.yaml configs/params_bragg_nonlocal_sampleds.yaml export MODEL_DIR=model_dir_braggnn @@ -63,17 +63,17 @@ source /software/cerebras/venvs/venv_pt/bin/activate # or your personal venv ---> ```console -source ~/R_1.8.0/venv_pt/bin/activate +source ~/R_1.9.1/venv_pt/bin/activate ``` Then ```console -cd ~/R_1.8.0/modelzoo/modelzoo/transformers/pytorch/bert +cd ~/R_1.9.1/modelzoo/modelzoo/transformers/pytorch/bert cp /software/cerebras/dataset/bert_large/bert_large_MSL128_sampleds.yaml configs/bert_large_MSL128_sampleds.yaml export MODEL_DIR=model_dir_bert_large_pytorch if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX pipeline --job_labels name=bert_pt --params configs/bert_large_MSL128_sampleds.yaml --num_workers_per_csx=1 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.8.0/modelzoo/ --compile_dir $(whoami) |& tee mytest.log +python run.py CSX --job_labels name=bert_pt --params configs/bert_large_MSL128_sampleds.yaml --num_workers_per_csx=1 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.9.1/modelzoo/ --compile_dir $(whoami) |& tee mytest.log ``` The last parts of the output should resemble the following, with messages about cuda that should be ignored and are not shown. @@ -97,27 +97,24 @@ The last parts of the output should resemble the following, with messages about 2023-05-17 18:18:49,293 INFO: Monitoring returned ``` +<~--- No longer part of the modelzoo ## BERT - TensorFlow The modelzoo/modelzoo/transformers/tf/bert directory is a TensorFlow implementation of [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)
This BERT-large msl128 example uses a single sample dataset for both training and evaluation. See the README.md in the source directory for details on how to build a dataset from text input. First, source a Cerebras TensorFlow virtual environment. - ```console -source ~/R_1.8.0/venv_tf/bin/activate +source ~/R_1.9.1/venv_tf/bin/activate ``` Then ```console -cd ~/R_1.8.0/modelzoo/modelzoo/transformers/tf/bert +cd ~/R_1.9.1/modelzoo/modelzoo/transformers/tf/bert cp /software/cerebras/dataset/bert_large/params_bert_large_msl128_sampleds.yaml configs/params_bert_large_msl128_sampleds.yaml export MODEL_DIR=mytest if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX pipeline --job_labels name=bert_tf --max_steps 1000 --params configs/params_bert_large_msl128_sampleds.yaml --num_workers_per_csx=1 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.8.0/modelzoo/ --compile_dir $(whoami) |& tee mytest.log +python run.py CSX --job_labels name=bert_tf --max_steps 1000 --params configs/params_bert_large_msl128_sampleds.yaml --num_workers_per_csx=1 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.9.1/modelzoo/ --compile_dir $(whoami) |& tee mytest.log ``` The last parts of the output should resemble the following, with messages about cuda that should be ignored and are not shown. @@ -140,6 +137,7 @@ INFO:root:Taking final checkpoint at step: 1000 INFO:tensorflow:Saved checkpoint for global step 1000 in 67.17758774757385 seconds: mytest/model.ckpt-1000 INFO:root:Monitoring returned ``` +---> ## GPT-J PyTorch @@ -148,22 +146,18 @@ This PyTorch GPT-J 6B parameter pretraining sample uses 2 CS2s. First, source a Cerebras PyTorch virtual environment. - ```console -source ~/R_1.8.0/venv_pt/bin/activate +source ~/R_1.9.1/venv_pt/bin/activate ``` Then ```console -cd ~/R_1.8.0/modelzoo/modelzoo/transformers/pytorch/gptj +cd ~/R_1.9.1/modelzoo/modelzoo/transformers/pytorch/gptj cp /software/cerebras/dataset/gptj/params_gptj_6B_sampleds.yaml configs/params_gptj_6B_sampleds.yaml export MODEL_DIR=model_dir_gptj if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX weight_streaming --job_labels name=gptj_pt --params configs/params_gptj_6B_sampleds.yaml --num_csx=2 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software --python_paths /home/$(whoami)/R_1.8.0/modelzoo/ --compile_dir $(whoami) |& tee mytest.log +python run.py CSX --job_labels name=gptj_pt --params configs/params_gptj_6B_sampleds.yaml --num_csx=2 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software --python_paths /home/$(whoami)/R_1.9.1/modelzoo/ --compile_dir $(whoami) |& tee mytest.log ``` The last parts of the output should resemble the following: @@ -193,17 +187,17 @@ source /software/cerebras/venvs/venv_tf/bin/activate # or your personal venv ---> ```console -source ~/R_1.8.0/venv_tf/bin/activate +source ~/R_1.9.1/venv_tf/bin/activate ``` Then ```console -cd ~/R_1.8.0/modelzoo/modelzoo/transformers/tf/gptj +cd ~/R_1.9.1/modelzoo/modelzoo/transformers/tf/gptj cp /software/cerebras/dataset/gptj/params_gptj_6B_tf_sampleds.yaml configs/params_gptj_6B_sampleds.yaml export MODEL_DIR=model_dir_gptj_tf if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX weight_streaming --job_labels name=gptj_tf --max_steps 500 --params configs/params_gptj_6B_sampleds.yaml --num_csx=2 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.8.0/modelzoo/ --compile_dir $(whoami) |& tee mytest.log +python run.py CSX --job_labels name=gptj_tf --max_steps 500 --params configs/params_gptj_6B_sampleds.yaml --num_csx=2 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.9.1/modelzoo/ --compile_dir $(whoami) |& tee mytest.log ``` The last parts of the output should resemble the following: diff --git a/docs/ai-testbed/cerebras/job-queuing-and-submission.md b/docs/ai-testbed/cerebras/job-queuing-and-submission.md index 17c99ee0d..7e9839475 100644 --- a/docs/ai-testbed/cerebras/job-queuing-and-submission.md +++ b/docs/ai-testbed/cerebras/job-queuing-and-submission.md @@ -5,18 +5,13 @@ The CS-2 cluster has its own **Kubernetes-based** system for job submission and Jobs are started automatically through the **Python** frameworks in modelzoo.common.pytorch.run_utils and modelzoo.common.tf.run_utils Continuous job status for a job is output to stdout/stderr; redirect the output, or consider using a persistent session started with **screen**, or **tmux**, or both. -In order to run the Cerebras csctl utility you will need to copy a config file to your home directory. Future versions of Cerebras software will reference a system wide file. -```console -mkdir ~/.cs; cp /opt/cerebras/config ~/.cs/config -``` - Jobs that have not yet completed can be listed as shown. Note: this command can take over a minute to complete. ```console -(venv_tf) $ csctl get jobs | grep -v "SUCCEEDED\|FAILED\|CANCELLED" -NAME AGE PHASE SYSTEMS USER LABELS -wsjob-eyjapwgnycahq9tus4w7id 88s RUNNING cer-cs2-01 username name=pt_smoketest,user=username -(venv_tf) $ +(venv_pt) $ csctl get jobs" +NAME AGE DURATION PHASE SYSTEMS USER LABELS DASHBOARD +wsjob-thjj8zticwsylhppkbmjqe 13s 1s RUNNING cer-cs2-01 username name=unet_pt https://grafana.cerebras1.lab.alcf.anl.gov/d/WebHNShVz/wsjob-dashboard?orgId=1&var-wsjob=wsjob-thjj8zticwsylhppkbmjqe&from=1691705374000&to=now +(venv_pt) $ ``` Jobs can be canceled as shown: @@ -46,7 +41,8 @@ wsjob-ez6dyfronnsg2rz7f7fqw4 19m SUCCEEDED cer-cs2-02 username testlabel=test, (venv_pt) $ ``` -See `csctl -h` for more options +See `csctl -h` for more options.
+Add `-h` to a command for help for that command, e.g. `csctl get -h` or `csctl cancel -h`. ```console $ csctl -h @@ -56,18 +52,19 @@ Usage: csctl [command] Available Commands: - cancel Cancel job - config Modify csctl config files - get Get resources - label Label resources - log-export Gather and download logs. - types Display resource types + cancel Cancel job + clear-worker-cache Clear the worker cache + config View csctl config files + get Get resources + label Label resources + log-export Gather and download logs. + types Display resource types Flags: - --csconfig string config file (default is $HOME/.cs/config) (default "$HOME/.cs/config") - -d, --debug int higher debug values will display more fields in output objects - -h, --help help for csctl + -d, --debug int higher debug values will display more fields in output objects + -h, --help help for csctl + --namespace string configure csctl to talk to different user namespaces + -v, --version version for csctl Use "csctl [command] --help" for more information about a command. - ``` diff --git a/docs/ai-testbed/cerebras/running-a-model-or-program.md b/docs/ai-testbed/cerebras/running-a-model-or-program.md index 912afa2c1..183c9499d 100644 --- a/docs/ai-testbed/cerebras/running-a-model-or-program.md +++ b/docs/ai-testbed/cerebras/running-a-model-or-program.md @@ -17,63 +17,40 @@ man screen man tmux ``` -#### Execution mode: - -The CS-2 system supports two modes of execution.
-1. Pipeline mode.
-This mode is used for smaller models (fewer than 1 billion parameters).
-2. Weight streaming mode.
-Weight streaming mode uses the host memory of the Cerebras cluster's MemoryX nodes to store and broadcast model weights, and supports larger models compared to pipelined mode.
- ## Running jobs on the wafer Follow these instructions to compile and train the `fc_mnist` TensorFlow and PyTorch samples. These models are a couple of fully connected layers plus dropout and RELU.
### Cerebras virtual environments - First, make virtual environments for Cerebras for PyTorch and/or TensorFlow. -See [Customizing Environments](./customizing-environment.md) for the procedures for making custom PyTorch and/or TensorFlow virtual environments for Cerebras. -If the environments are made in ```~/R_1.8.0/```, then they would be activated as follows: +See [Customizing Environments](./customizing-environment.md) for the procedures for making PyTorch and/or TensorFlow virtual environments for Cerebras. +If the environments are made in ```~/R_1.9.1/```, then they would be activated as follows: ```console -source ~/R_1.8.0/venv_pt/bin/activate +source ~/R_1.9.1/venv_pt/bin/activate ``` or ```console -source ~/R_1.8.0/vent_tf/bin/activate +source ~/R_1.9.1/vent_tf/bin/activate ``` ### Clone the Cerebras modelzoo ```console -mkdir ~/R_1.8.0 -cd ~/R_1.8.0 +mkdir ~/R_1.9.1 +cd ~/R_1.9.1 git clone https://github.com/Cerebras/modelzoo.git cd modelzoo git tag -git checkout Release_1.8.0 +git checkout Release_1.9.1 ``` ## Running a Pytorch sample ### Activate your PyTorch virtual environment, and change to the working directory ```console -source ~/R_1.8.0/venv_pt/bin/activate -cd ~/R_1.8.0/modelzoo/modelzoo/fc_mnist/pytorch +source ~/R_1.9.1/venv_pt/bin/activate +cd ~/R_1.9.1/modelzoo/modelzoo/fc_mnist/pytorch ``` Next, edit configs/params.yaml, making the following changes: @@ -89,10 +66,10 @@ and ```text eval_input: - data_dir: "./data/mnist/val" -+ data_dir: "/software/cerebras/dataset/fc_mnist/data/mnist/val" ++ data_dir: "/software/cerebras/dataset/fc_mnist/data/mnist/train" ``` -If you want to have the sample download the dataset, you will need to specify absolute paths for the "data_dir"s +If you want to have the sample download the dataset, you will need to specify absolute paths for the "data_dir"s. ### Running a sample PyTorch training job @@ -102,7 +79,7 @@ To run the sample: export MODEL_DIR=model_dir # deletion of the model_dir is only needed if sample has been previously run if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX pipeline --job_labels name=pt_smoketest --params configs/params.yaml --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software --python_paths /home/$(whoami)/R_1.8.0/modelzoo --compile_dir /$(whoami) |& tee mytest.log +python run.py CSX --job_labels name=pt_smoketest --params configs/params.yaml --num_csx=1 --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software --python_paths /home/$(whoami)/R_1.9.1/modelzoo --compile_dir /$(whoami) |& tee mytest.log ``` A successful fc_mnist PyTorch training run should finish with output resembling the following: @@ -117,22 +94,15 @@ A successful fc_mnist PyTorch training run should finish with output resembling 2023-05-15 16:06:04,356 INFO: Monitoring returned ``` + Next, edit configs/params.yaml, making the following change. Cerebras requires that the data_dir be an absolute path. @@ -155,7 +125,7 @@ Next, edit configs/params.yaml, making the following change. Cerebras requires t export MODEL_DIR=model_dir # deletion of the model_dir is only needed if sample has been previously run if [ -d "$MODEL_DIR" ]; then rm -Rf $MODEL_DIR; fi -python run.py CSX pipeline --job_labels name=tf_fc_mnist --params configs/params.yaml --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.8.0/modelzoo/ --compile_dir /$(whoami) |& tee mytest.log +python run.py CSX pipeline --job_labels name=tf_fc_mnist --params configs/params.yaml --mode train --model_dir $MODEL_DIR --mount_dirs /home/ /software/ --python_paths /home/$(whoami)/R_1.9.1/modelzoo/ --compile_dir /$(whoami) |& tee mytest.log ``` A successful fc_mnist TensorFlow training run should finish with output resembling the following: @@ -169,4 +139,5 @@ INFO:root:Saving step 99999 in dataloader checkpoint INFO:tensorflow:Saved checkpoint for global step 100000 in 3.9300642013549805 seconds: model_dir/model.ckpt-100000 INFO:root:Monitoring returned ``` +---> diff --git a/mkdocs.yml b/mkdocs.yml index 90a6f2ed0..c69e9eb57 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -196,9 +196,9 @@ nav: - System Overview: ai-testbed/cerebras/system-overview.md - Getting Started: ai-testbed/cerebras/getting-started.md - Running a Model/Program: ai-testbed/cerebras/running-a-model-or-program.md + - Customizing Environments: ai-testbed/cerebras/customizing-environment.md - Job Queuing and Submission: ai-testbed/cerebras/job-queuing-and-submission.md - Example Programs: ai-testbed/cerebras/example-programs.md - - Customizing Environments: ai-testbed/cerebras/customizing-environment.md #- Performance Tools: ai-testbed/cerebras/performance-tools.md - Tunneling and Forwarding Ports: ai-testbed/cerebras/tunneling-and-forwarding-ports.md - Miscellaneous: ai-testbed/cerebras/miscellaneous.md From 3a076eac25607705bd63597c98d251be7fecb1d8 Mon Sep 17 00:00:00 2001 From: Varuni Sastry <88804132+vksastry@users.noreply.github.com> Date: Fri, 11 Aug 2023 12:40:43 -0500 Subject: [PATCH 2/5] Update example-programs.md fixed typo to exclude tensorflow Bert --- docs/ai-testbed/cerebras/example-programs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ai-testbed/cerebras/example-programs.md b/docs/ai-testbed/cerebras/example-programs.md index d4ae05555..f99527c74 100644 --- a/docs/ai-testbed/cerebras/example-programs.md +++ b/docs/ai-testbed/cerebras/example-programs.md @@ -97,7 +97,7 @@ The last parts of the output should resemble the following, with messages about 2023-05-17 18:18:49,293 INFO: Monitoring returned ``` -<~--- No longer part of the modelzoo + + ```console source ~/R_1.9.1/venv_tf/bin/activate ``` @@ -211,3 +211,4 @@ INFO:root:Taking final checkpoint at step: 500 INFO:tensorflow:Saved checkpoint for global step 500 in 304.37238907814026 seconds: model_dir_gptj_tf/model.ckpt-500 INFO:root:Monitoring is over without any issue ``` +---> From 1bbfe8e6a72eba26a423b6e68d038411a869603a Mon Sep 17 00:00:00 2001 From: Varuni Sastry <88804132+vksastry@users.noreply.github.com> Date: Fri, 11 Aug 2023 12:48:11 -0500 Subject: [PATCH 4/5] Update job-queuing-and-submission.md --- docs/ai-testbed/cerebras/job-queuing-and-submission.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ai-testbed/cerebras/job-queuing-and-submission.md b/docs/ai-testbed/cerebras/job-queuing-and-submission.md index 7e9839475..efafe454d 100644 --- a/docs/ai-testbed/cerebras/job-queuing-and-submission.md +++ b/docs/ai-testbed/cerebras/job-queuing-and-submission.md @@ -8,7 +8,7 @@ Continuous job status for a job is output to stdout/stderr; redirect the output, Jobs that have not yet completed can be listed as shown. Note: this command can take over a minute to complete. ```console -(venv_pt) $ csctl get jobs" +(venv_pt) $ csctl get jobs NAME AGE DURATION PHASE SYSTEMS USER LABELS DASHBOARD wsjob-thjj8zticwsylhppkbmjqe 13s 1s RUNNING cer-cs2-01 username name=unet_pt https://grafana.cerebras1.lab.alcf.anl.gov/d/WebHNShVz/wsjob-dashboard?orgId=1&var-wsjob=wsjob-thjj8zticwsylhppkbmjqe&from=1691705374000&to=now (venv_pt) $ From f79516a708ebb0d915a443288e3c931b32ac1ba6 Mon Sep 17 00:00:00 2001 From: Varuni Sastry <88804132+vksastry@users.noreply.github.com> Date: Fri, 11 Aug 2023 12:49:38 -0500 Subject: [PATCH 5/5] Update tunneling-and-forwarding-ports.md --- docs/ai-testbed/cerebras/tunneling-and-forwarding-ports.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ai-testbed/cerebras/tunneling-and-forwarding-ports.md b/docs/ai-testbed/cerebras/tunneling-and-forwarding-ports.md index 4e7cff58d..ef102a266 100644 --- a/docs/ai-testbed/cerebras/tunneling-and-forwarding-ports.md +++ b/docs/ai-testbed/cerebras/tunneling-and-forwarding-ports.md @@ -2,4 +2,4 @@ See ALCF's [Jupyter Instructions](https://github.com/argonne-lcf/ThetaGPU-Docs/blob/master/doc_staging/jupyter.md), and -[Tunneling and forwarding ports](../sambanova_gen2/tunneling-and-forwarding-ports.md) in the **SambaNova** documentation. The Cerebras login nodes are direct login; tunneling and port forwarding do not involve jump hosts. +[Tunneling and forwarding ports](../sambanova_gen2/tunneling-and-forwarding-ports.md). The Cerebras login nodes are direct login; tunneling and port forwarding do not involve jump hosts.