{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Trigger a run from a notebook"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[NeMo W 2024-08-29 17:14:25 nemo_logging:349] /Users/romeyn/base/code/.venv/lib/python3.10/site-packages/megatron/core/optimizer/__init__.py:18: UserWarning: Transformer Engine and Apex are not installed. Falling back to Torch optimizers.\n",
" warnings.warn(\n",
" \n",
"[NeMo W 2024-08-29 17:14:25 nemo_logging:349] /Users/romeyn/base/code/.venv/lib/python3.10/site-packages/megatron/core/optimizer/clip_grads.py:31: UserWarning: Transformer Engine and Apex are not installed. Falling back to local implementations of multi_tensor_applier, multi_tensor_l2norm, and multi_tensor_scale\n",
" warnings.warn(\n",
" \n"
]
}
],
"source": [
"# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"import nemo_run as run\n",
"from nemo.collections import llm\n",
"from nemo.collections.llm.recipes import llama3_8b\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"image/svg+xml": [
"\n",
"\n",
"\n",
"\n",
"\n"
],
"text/plain": [
")]>,\n",
" data=,\n",
" trainer=],\n",
" devices=8,\n",
" gradient_clip_val=1.0,\n",
" limit_test_batches=50,\n",
" limit_val_batches=32,\n",
" log_every_n_steps=10,\n",
" max_steps=1168251,\n",
" num_nodes=1,\n",
" plugins=,\n",
" strategy=,\n",
" use_distributed_sampler=False,\n",
" val_check_interval=2000)]>,\n",
" log=,\n",
" tensorboard=,\n",
" wandb=None)]>,\n",
" resume=,\n",
" optim=,\n",
" lr_scheduler=)]>)]>"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pretrain = llama3_8b.pretrain_recipe(num_nodes=1, num_gpus_per_node=8)\n",
"\n",
"pretrain"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 2
}