Giter VIP home page Giter VIP logo

progentrl's Introduction

Hi there, I'm Bibyutatsu AKA Bibhash

I'm a Data Scientist from India

  • Currently working on my personal project proGENTRL
  • Fascinated with GANs and Pytorch is love
  • Wanna know more click here
  • Ask me anything here

Number of visitors

Shoutout to @anuraghazra for his awesome repo github-readme-stats, do check it out.

progentrl's People

Contributors

bibyutatsu avatar

Stargazers

 avatar  avatar  avatar

Watchers

 avatar  avatar

Forkers

28yogesh

progentrl's Issues

model performance

Hello, Thanks for your code! Here is one question. Is the final sampling result(some smiles) in your Example.ipynb sampled from the model after complete training? I notice that many of them are invalid.

Please help how to slove Keyerror in this code.

trainer.fit(model) # Training the model

LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]

| Name | Type | Params

0 | enc | RNNEncoder | 888 K
1 | dec | DilConvDecoder | 596 K
2 | lp | LP | 1.6 M

3.1 M Trainable params
0 Non-trainable params
3.1 M Total params
12.473 Total estimated model params size (MB)

Epoch 0: 0%
0/5333 [00:00<?, ?it/s]


KeyError Traceback (most recent call last)
/tmp/ipykernel_679832/1077659130.py in
----> 1 trainer.fit(model) # Training the model

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
518 model = _maybe_unwrap_optimized(model)
519 self.strategy._lightning_module = model
--> 520 call._call_and_handle_interrupt(
521 self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
522 )

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
42 return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
43 else:
---> 44 return trainer_fn(*args, **kwargs)
45
46 except _TunerExitException:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
557 model_connected=self.lightning_module is not None,
558 )
--> 559 self._run(model, ckpt_path=ckpt_path)
560
561 assert self.state.stopped

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
933 # RUN THE TRAINER
934 # ----------------------------
--> 935 results = self._run_stage()
936
937 # ----------------------------

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _run_stage(self)
976 self._run_sanity_check()
977 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
--> 978 self.fit_loop.run()
979 return None
980 raise RuntimeError(f"Unexpected state {self.state}")

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py in run(self)
199 try:
200 self.on_advance_start()
--> 201 self.advance()
202 self.on_advance_end()
203 self._restarting = False

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py in advance(self)
352 self._data_fetcher.setup(combined_loader)
353 with self.trainer.profiler.profile("run_training_epoch"):
--> 354 self.epoch_loop.run(self._data_fetcher)
355
356 def on_advance_end(self) -> None:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in run(self, data_fetcher)
131 while not self.done:
132 try:
--> 133 self.advance(data_fetcher)
134 self.on_advance_end()
135 self._restarting = False

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in advance(self, data_fetcher)
216 if trainer.lightning_module.automatic_optimization:
217 # in automatic optimization, there can only be one optimizer
--> 218 batch_output = self.automatic_optimization.run(trainer.optimizers[0], kwargs)
219 else:
220 batch_output = self.manual_optimization.run(kwargs)

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in run(self, optimizer, kwargs)
183 # gradient update with accumulated gradients
184 else:
--> 185 self._optimizer_step(kwargs.get("batch_idx", 0), closure)
186
187 result = closure.consume_result()

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in _optimizer_step(self, batch_idx, train_step_and_backward_closure)
259
260 # model hook
--> 261 call._call_lightning_module_hook(
262 trainer,
263 "optimizer_step",

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_lightning_module_hook(trainer, hook_name, pl_module, *args, **kwargs)
140
141 with trainer.profiler.profile(f"[LightningModule]{pl_module.class.name}.{hook_name}"):
--> 142 output = fn(*args, **kwargs)
143
144 # restore current_fx when nested context

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/core/module.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure)
1263 pg["lr"] = lr_scale * self.learning_rate
1264 """
-> 1265 optimizer.step(closure=optimizer_closure)
1266
1267 def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer) -> None:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, **kwargs)
156
157 assert self._strategy is not None
--> 158 step_output = self._strategy.optimizer_step(self._optimizer, closure, **kwargs)
159
160 self._on_after_step()

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py in optimizer_step(self, optimizer, closure, model, **kwargs)
222 # TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
223 assert isinstance(model, pl.LightningModule)
--> 224 return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, **kwargs)
225
226 def _setup_model_and_optimizers(self, model: Module, optimizers: List[Optimizer]) -> Tuple[Module, List[Optimizer]]:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in optimizer_step(self, optimizer, model, closure, **kwargs)
112 """Hook to run the optimizer step."""
113 closure = partial(self._wrap_closure, model, optimizer, closure)
--> 114 return optimizer.step(closure=closure, **kwargs)
115
116 def _clip_gradients(

~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in wrapper(*args, **kwargs)
278 f"but got {result}.")
279
--> 280 out = func(*args, **kwargs)
281 self._optimizer_step_code()
282

~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in _use_grad(self, *args, **kwargs)
31 try:
32 torch.set_grad_enabled(self.defaults['differentiable'])
---> 33 ret = func(self, *args, **kwargs)
34 finally:
35 torch.set_grad_enabled(prev_grad)

~/anaconda3/lib/python3.9/site-packages/torch/optim/adam.py in step(self, closure)
119 if closure is not None:
120 with torch.enable_grad():
--> 121 loss = closure()
122
123 for group in self.param_groups:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in _wrap_closure(self, model, optimizer, closure)
99 consistent with the PrecisionPlugin subclasses that cannot pass optimizer.step(closure) directly.
100 """
--> 101 closure_result = closure()
102 self._after_closure(model, optimizer)
103 return closure_result

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in call(self, *args, **kwargs)
138
139 def call(self, *args: Any, **kwargs: Any) -> Optional[Tensor]:
--> 140 self._result = self.closure(*args, **kwargs)
141 return self._result.loss
142

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in closure(self, *args, **kwargs)
124
125 def closure(self, *args: Any, **kwargs: Any) -> ClosureResult:
--> 126 step_output = self._step_fn()
127
128 if step_output.closure_loss is None:

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in _training_step(self, kwargs)
306
307 # manually capture logged metrics
--> 308 training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
309 self.trainer.strategy.post_training_step()
310

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_strategy_hook(trainer, hook_name, *args, **kwargs)
286
287 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.class.name}.{hook_name}"):
--> 288 output = fn(*args, **kwargs)
289
290 # restore current_fx when nested context

~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py in training_step(self, *args, **kwargs)
364 with self.precision_plugin.train_step_context():
365 assert isinstance(self.model, TrainingStep)
--> 366 return self.model.training_step(*args, **kwargs)
367
368 def post_training_step(self) -> None:

~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in training_step(self, batch, batch_idx)
133 def training_step(self, batch, batch_idx):
134 if self.current_epoch in [0, 1, 5] and batch_idx==0:
--> 135 self.reinit_from_data()
136
137 x_batch, y_batch = batch

~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in reinit_from_data(self)
102
103 if (self.buf is None) or (self.buf.shape[0] < 5000):
--> 104 enc_out = self.enc(x_batch)
105 means, log_stds = torch.split(enc_out,
106 len(self.latent_descr),

~/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []

~/Desktop/user/new_molecule/progentrl/encoder.py in forward(self, sm_list)
24 """
25
---> 26 tokens, lens = encode(sm_list)
27 to_feed = tokens.transpose(1, 0).to(self.embs.weight.device)
28

~/Desktop/user/new_molecule/progentrl/tokenizer.py in encode(sm_list, pad_size)
62 lens = []
63 for s in sm_list:
---> 64 tokens = ([1] + [__t2i[tok]
65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))

~/Desktop/user/new_molecule/progentrl/tokenizer.py in (.0)
62 lens = []
63 for s in sm_list:
---> 64 tokens = ([1] + [__t2i[tok]
65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))

KeyError: '7'

AttributeError: 'NoneType' object has no attribute 'buf'

torch.cuda.is_available() ---> True

But still its failing with following error. Kindly help

Cannot Load the Models: Enc Model not Found

AttributeError Traceback (most recent call last)
in
5 train_loader,
6 latent_descr,
----> 7 feature_descr, load_model='./model/')

~\Desktop\proGENTRL-master\proGENTRL-master\progentrl\gen_vaelp.py in init(self, enc, dec, train_loader, latent_descr, feature_descr, tt_int, tt_type, beta, gamma, load_model)
53 self = load(self, load_model)
54
---> 55 self.buf = None
56
57 def get_elbo(self, x, y):

AttributeError: 'NoneType' object has no attribute 'buf'

About pIC50 value

def __init__(self, sources=[], props=['logIC50', 'BFL', 'pipeline'],

import torch

from torch.utils.data import Dataset

import pandas as pd
import numpy as np

class MolecularDataset(Dataset):
def init(self, sources=[], props=['logIC50', 'BFL', 'pipeline'],
with_missings=False):
self.num_sources = len(sources)

Screenshot 2023-05-11 185634

where the input of logIC50 Value in the code and where is the the dataset with pIC50 value.

How to incorporate any target specific molecules?

Hi Bibyutatsu,

Thanks for this wonderful script. I am new to this AI world and need your help. Am able to run the gentrl code successfully and able to get new designs from input dataset of moses. Now I want to add dataset of some target specific molecules and produce new designs as the one mentioned in paper (Deep learning enables rapid identification of potent DDR1 kinase inhibitors) where they trained the model on ZINC dataset followed by DDR1 and common kinase dataset along with pIC50 values.
When I use some other curated dataset of target specific molecules instead of moses, I get below mentioned error.
Where and how to add new dataset to get expected designs.

Epoch 0 :
!!!


KeyError Traceback (most recent call last)
in
----> 1 model.train_as_vaelp(train_loader, lr=1e-4)
2 get_ipython().system(' mkdir -p saved_gentrl')
3 model.save('./saved_gentrl/')

~/GENTRL/gentrl/gentrl.py in train_as_vaelp(self, train_loader, num_epochs, verbose_step, lr)
150 if to_reinit:
151 if (buf is None) or (buf.shape[0] < 5000):
--> 152 enc_out = self.enc.encode(x_batch)
153 means, log_stds = torch.split(enc_out,
154 len(self.latent_descr),

~/GENTRL/gentrl/encoder.py in encode(self, sm_list)
24 """
25
---> 26 tokens, lens = encode(sm_list)
27 to_feed = tokens.transpose(1, 0).to(self.embs.weight.device)
28

~/GENTRL/gentrl/tokenizer.py in encode(sm_list, pad_size)
63 for s in sm_list:
64 tokens = ([1] + [__t2i[tok]
---> 65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))
67 tokens += (pad_size - len(tokens)) * [2]

~/GENTRL/gentrl/tokenizer.py in (.0)
63 for s in sm_list:
64 tokens = ([1] + [__t2i[tok]
---> 65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))
67 tokens += (pad_size - len(tokens)) * [2]

KeyError: '+'

Will you please help me out with this?

running errors

After I run this cell
model = gentrl.gentrlVAE(enc,
dec,
train_loader,
latent_descr,
feature_descr, load_model='./model/')

I got
Cannot Load the Models: Enc Model not Found


AttributeError Traceback (most recent call last)
in
3 train_loader,
4 latent_descr,
----> 5 feature_descr, load_model='./model/')

/mnt/SSD/projects/proGENTRL/progentrl/gen_vaelp.py in init(self, enc, dec, train_loader, latent_descr, feature_descr, tt_int, tt_type, beta, gamma, load_model)
54
55 #Epoch Variables
---> 56 self.buf = None
57
58 def get_elbo(self, x, y):

AttributeError: 'NoneType' object has no attribute 'buf'

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.