Skip to content

Type error when I run vae.train() #2246

@Huiflorazhan

Description

@Huiflorazhan

<when I run the following codes, there is a type error>
I'm using Mac to run the codes, I don't know how to solve this problem. Could you help me with this? Thank you in advance.

#scvi.model.SCVI.setup_anndata(adata)
#vae = scvi.model.SCVI(adata)
#vae.train()


TypeError Traceback (most recent call last)
Cell In[16], line 3
1 scvi.model.SCVI.setup_anndata(adata)
2 vae = scvi.model.SCVI(adata)
----> 3 vae.train()

File ~/anaconda3/.conda/lib/python3.11/site-packages/scvi/model/base/_training_mixin.py:78, in UnsupervisedTrainingMixin.train(self, max_epochs, use_gpu, accelerator, devices, train_size, validation_size, shuffle_set_split, batch_size, early_stopping, plan_kwargs, **trainer_kwargs)
74 es = "early_stopping"
75 trainer_kwargs[es] = (
76 early_stopping if es not in trainer_kwargs.keys() else trainer_kwargs[es]
77 )
---> 78 runner = self._train_runner_cls(
79 self,
80 training_plan=training_plan,
81 data_splitter=data_splitter,
82 max_epochs=max_epochs,
83 use_gpu=use_gpu,
84 accelerator=accelerator,
85 devices=devices,
86 **trainer_kwargs,
87 )
88 return runner()

File ~/anaconda3/.conda/lib/python3.11/site-packages/scvi/train/_trainrunner.py:85, in TrainRunner.init(self, model, training_plan, data_splitter, max_epochs, use_gpu, accelerator, devices, **trainer_kwargs)
83 self.lightning_devices = lightning_devices
84 self.device = device
---> 85 self.trainer = self._trainer_cls(
86 max_epochs=max_epochs,
87 accelerator=accelerator,
88 devices=lightning_devices,
89 **trainer_kwargs,
90 )

File ~/anaconda3/.conda/lib/python3.11/site-packages/scvi/train/_trainer.py:139, in Trainer.init(self, accelerator, devices, benchmark, check_val_every_n_epoch, max_epochs, default_root_dir, enable_checkpointing, num_sanity_val_steps, enable_model_summary, early_stopping, early_stopping_monitor, early_stopping_min_delta, early_stopping_patience, early_stopping_mode, enable_progress_bar, progress_bar_refresh_rate, simple_progress_bar, logger, log_every_n_steps, **kwargs)
136 if logger is None:
137 logger = SimpleLogger()
--> 139 super().init(
140 accelerator=accelerator,
141 devices=devices,
142 benchmark=benchmark,
143 check_val_every_n_epoch=check_val_every_n_epoch,
144 max_epochs=max_epochs,
145 default_root_dir=default_root_dir,
146 enable_checkpointing=enable_checkpointing,
147 num_sanity_val_steps=num_sanity_val_steps,
148 enable_model_summary=enable_model_summary,
149 logger=logger,
150 log_every_n_steps=log_every_n_steps,
151 enable_progress_bar=enable_progress_bar,
152 **kwargs,
153 )

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/pytorch/utilities/argparse.py:70, in _defaults_from_env_vars..insert_env_defaults(self, *args, **kwargs)
67 kwargs = dict(list(env_variables.items()) + list(kwargs.items()))
69 # all args were already moved to kwargs
---> 70 return fn(self, **kwargs)

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/pytorch/trainer/trainer.py:399, in Trainer.init(self, accelerator, strategy, devices, num_nodes, precision, logger, callbacks, fast_dev_run, max_epochs, min_epochs, max_steps, min_steps, max_time, limit_train_batches, limit_val_batches, limit_test_batches, limit_predict_batches, overfit_batches, val_check_interval, check_val_every_n_epoch, num_sanity_val_steps, log_every_n_steps, enable_checkpointing, enable_progress_bar, enable_model_summary, accumulate_grad_batches, gradient_clip_val, gradient_clip_algorithm, deterministic, benchmark, inference_mode, use_distributed_sampler, profiler, detect_anomaly, barebones, plugins, sync_batchnorm, reload_dataloaders_every_n_epochs, default_root_dir)
396 # init connectors
397 self._data_connector = _DataConnector(self)
--> 399 self._accelerator_connector = _AcceleratorConnector(
400 devices=devices,
401 accelerator=accelerator,
402 strategy=strategy,
403 num_nodes=num_nodes,
404 sync_batchnorm=sync_batchnorm,
405 benchmark=benchmark,
406 use_distributed_sampler=use_distributed_sampler,
407 deterministic=deterministic,
408 precision=precision,
409 plugins=plugins,
410 )
411 self._logger_connector = _LoggerConnector(self)
412 self._callback_connector = _CallbackConnector(self)

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/pytorch/trainer/connectors/accelerator_connector.py:157, in _AcceleratorConnector.init(self, devices, num_nodes, accelerator, strategy, plugins, precision, sync_batchnorm, benchmark, use_distributed_sampler, deterministic)
154 self._accelerator_flag = self._choose_gpu_accelerator_backend()
156 self._check_device_config_and_set_final_flags(devices=devices, num_nodes=num_nodes)
--> 157 self._set_parallel_devices_and_init_accelerator()
159 # 3. Instantiate ClusterEnvironment
160 self.cluster_environment: ClusterEnvironment = self._choose_and_init_cluster_environment()

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/pytorch/trainer/connectors/accelerator_connector.py:390, in _AcceleratorConnector._set_parallel_devices_and_init_accelerator(self)
382 raise MisconfigurationException(
383 f"{accelerator_cls.__qualname__} can not run on your system"
384 " since the accelerator is not available. The following accelerator(s)"
385 " is available and can be passed into accelerator argument of"
386 f" Trainer: {available_accelerator}."
387 )
389 self._set_devices_flag_if_auto_passed()
--> 390 self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
391 if not self._parallel_devices:
392 self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/pytorch/accelerators/cpu.py:48, in CPUAccelerator.parse_devices(devices)
45 @staticmethod
46 def parse_devices(devices: Union[int, str, List[int]]) -> int:
47 """Accelerator device parsing logic."""
---> 48 return _parse_cpu_cores(devices)

File ~/anaconda3/.conda/lib/python3.11/site-packages/lightning/fabric/accelerators/cpu.py:85, in _parse_cpu_cores(cpu_cores)
82 cpu_cores = int(cpu_cores)
84 if not isinstance(cpu_cores, int) or cpu_cores <= 0:
---> 85 raise TypeError("devices selected with CPUAccelerator should be an int > 0.")
87 return cpu_cores

TypeError: devices selected with CPUAccelerator should be an int > 0.

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions