Skip to content

Tutorial Goal-directed molecule generation: UnboundLocalError: local variable 'sascorer' referenced before assignment  #31

@schinto

Description

@schinto

In the tutorial "Goal-directed molecule generation", the following error occurred

UnboundLocalError: local variable 'sascorer' referenced before assignment 
import torchdrug
print(torchdrug.__version__)

0.1.0

import os
import pickle
import torch
from torchdrug import core, datasets, models, tasks
from collections import defaultdict

# dataset = datasets.ZINC250k("~/Projects/drugs/molecule-datasets/", kekulize=True,
#                             node_feature="symbol")
filename = os.path.expanduser("~/Projects/drugs/molecule-datasets/zinc250k.pkl")
print(f"Loading {filename}")
with open(filename, "rb") as fin:
    dataset = pickle.load(fin)

model = models.RGCN(input_dim=dataset.node_feature_dim,
                    num_relation=dataset.num_bond_type,
                    hidden_dims=[256, 256, 256, 256], batch_norm=False)
task = tasks.GCPNGeneration(model, dataset.atom_types,
                            max_edge_unroll=12, max_node=38,
                            task="plogp", criterion="ppo",
                            reward_temperature=1,
                            agent_update_interval=3, gamma=0.9)

optimizer = torch.optim.Adam(task.parameters(), lr=1e-5)
solver = core.Engine(task, dataset, None, None, optimizer,
                     #gpus=(0,),
                     batch_size=16, log_interval=10)

filename = os.path.expanduser("~/Projects/drugs/graphgeneration/gcpn_zinc250k_1epoch.pkl")
solver.load(filename,
            load_optimizer=False)

# RL finetuning
solver.train(num_epoch=10)
filename = os.path.expanduser("~/Projects/drugs/graphgeneration/gcpn_zinc250k_1epoch_finetune.pkl")
solver.save(filename)
UnboundLocalError: local variable 'sascorer' referenced before assignment
---------------------------------------------------------------------------
UnboundLocalError                         Traceback (most recent call last)
/var/folders/zq/yt7gftfj7_x_11psy591dtz00000gn/T/ipykernel_7569/599880889.py in <module>
     39 
     40 # RL finetuning
---> 41 solver.train(num_epoch=10)
     42 filename = os.path.expanduser("~/Projects/drugs/graphgeneration/gcpn_zinc250k_1epoch_finetune.pkl")
     43 solver.save(filename)

~/opt/anaconda3/envs/drugs/lib/python3.8/site-packages/torchdrug/core/engine.py in train(self, num_epoch, batch_per_epoch)
    141                     batch = utils.cuda(batch, device=self.device)
    142 
--> 143                 loss, metric = model(batch)
    144                 if not loss.requires_grad:
    145                     raise RuntimeError("Loss doesn't require grad. Did you define any loss in the task?")

~/opt/anaconda3/envs/drugs/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    887             result = self._slow_forward(*input, **kwargs)
    888         else:
--> 889             result = self.forward(*input, **kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),

~/opt/anaconda3/envs/drugs/lib/python3.8/site-packages/torchdrug/tasks/generation.py in forward(self, batch)
    700                 metric.update(_metric)
    701             elif criterion == "ppo":
--> 702                 _loss, _metric = self.reinforce_forward(batch)
    703                 all_loss += _loss * weight
    704                 metric.update(_metric)

~/opt/anaconda3/envs/drugs/lib/python3.8/site-packages/torchdrug/tasks/generation.py in reinforce_forward(self, batch)
    811         for task in self.task:
    812             if task == "plogp":
--> 813                 plogp = metrics.penalized_logP(graph)
    814                 metric["Penalized logP"] = plogp.mean()
    815                 metric["Penalized logP (max)"] = plogp.max()

~/opt/anaconda3/envs/drugs/lib/python3.8/site-packages/torchdrug/metrics/metric.py in penalized_logP(pred)
    117                 Chem.GetSymmSSSR(mol)
    118                 logp = Descriptors.MolLogP(mol)
--> 119                 sa = sascorer.calculateScore(mol)
    120             logp = (logp - logp_mean) / logp_std
    121             sa = (sa - sa_mean) / sa_std

UnboundLocalError: local variable 'sascorer' referenced before assignment

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions