|
140 | 140 | "* Your subscription id\n",
|
141 | 141 | "* The resource group name\n",
|
142 | 142 | "\n",
|
143 |
| - "**Note**: As with other Azure services, there are limits on certain resources (for eg. BatchAI cluster size) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." |
| 143 | + "**Note**: As with other Azure services, there are limits on certain resources (for eg. AmlCompute quota) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota." |
144 | 144 | ]
|
145 | 145 | },
|
146 | 146 | {
|
|
210 | 210 | "source": [
|
211 | 211 | "## Create compute resources for your training experiments\n",
|
212 | 212 | "\n",
|
213 |
| - "Many of the subsequent examples use BatchAI clusters to train models at scale. To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy." |
| 213 | + "Many of the subsequent examples use Azure Machine Learning managed compute (AmlCompute) to train models at scale. To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy." |
214 | 214 | ]
|
215 | 215 | },
|
216 | 216 | {
|
|
219 | 219 | "metadata": {},
|
220 | 220 | "outputs": [],
|
221 | 221 | "source": [
|
222 |
| - "from azureml.core.compute import ComputeTarget, BatchAiCompute\n", |
| 222 | + "from azureml.core.compute import ComputeTarget, AmlCompute\n", |
223 | 223 | "from azureml.core.compute_target import ComputeTargetException\n",
|
224 | 224 | "\n",
|
225 | 225 | "# Choose a name for your CPU cluster\n",
|
|
230 | 230 | " cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
231 | 231 | " print('Found existing cluster, use it.')\n",
|
232 | 232 | "except ComputeTargetException:\n",
|
233 |
| - " compute_config = BatchAiCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', \n", |
234 |
| - " autoscale_enabled=True,\n", |
235 |
| - " cluster_min_nodes=0, \n", |
236 |
| - " cluster_max_nodes=4)\n", |
| 233 | + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',\n", |
| 234 | + " max_nodes=4)\n", |
237 | 235 | " cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
238 | 236 | "\n",
|
239 | 237 | "cpu_cluster.wait_for_completion(show_output=True)"
|
|
252 | 250 | "metadata": {},
|
253 | 251 | "outputs": [],
|
254 | 252 | "source": [
|
255 |
| - "from azureml.core.compute import ComputeTarget, BatchAiCompute\n", |
| 253 | + "from azureml.core.compute import ComputeTarget, AmlCompute\n", |
256 | 254 | "from azureml.core.compute_target import ComputeTargetException\n",
|
257 | 255 | "\n",
|
258 | 256 | "# Choose a name for your GPU cluster\n",
|
|
263 | 261 | " gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n",
|
264 | 262 | " print('Found existing cluster, use it.')\n",
|
265 | 263 | "except ComputeTargetException:\n",
|
266 |
| - " compute_config = BatchAiCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n", |
267 |
| - " autoscale_enabled=True,\n", |
268 |
| - " cluster_min_nodes=0, \n", |
269 |
| - " cluster_max_nodes=4)\n", |
| 264 | + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", |
| 265 | + " max_nodes=4)\n", |
270 | 266 | " gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n",
|
271 | 267 | "\n",
|
272 | 268 | "gpu_cluster.wait_for_completion(show_output=True)"
|
|
309 | 305 | "name": "python",
|
310 | 306 | "nbconvert_exporter": "python",
|
311 | 307 | "pygments_lexer": "ipython3",
|
312 |
| - "version": "3.6.6" |
| 308 | + "version": "3.6.2" |
313 | 309 | }
|
314 | 310 | },
|
315 | 311 | "nbformat": 4,
|
|
0 commit comments