MMACE Paper: Recurrent Neural Network for Predicting Solubility

Hide code cell source
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import numpy as np
import tensorflow as tf
import selfies as sf
import exmol
from dataclasses import dataclass
from rdkit.Chem.Draw import rdDepictor

rdDepictor.SetPreferCoordGen(True)
sns.set_context("notebook")
sns.set_style(
    "dark",
    {
        "xtick.bottom": True,
        "ytick.left": True,
        "xtick.color": "#666666",
        "ytick.color": "#666666",
        "axes.edgecolor": "#666666",
        "axes.linewidth": 0.8,
        "figure.dpi": 300,
    },
)
color_cycle = ["#1BBC9B", "#F06060", "#5C4B51", "#F3B562", "#6e5687"]
mpl.rcParams["axes.prop_cycle"] = mpl.cycler(color=color_cycle)
soldata = pd.read_csv(
    "https://github.com/whitead/dmol-book/raw/main/data/curated-solubility-dataset.csv"
)
features_start_at = list(soldata.columns).index("MolWt")
np.random.seed(0)
2023-12-04 18:06:56.198364: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-12-04 18:06:56.236221: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-12-04 18:06:56.236948: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-12-04 18:06:56.945141: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
Cell In[1], line 10
      8 import tensorflow as tf
      9 import selfies as sf
---> 10 import exmol
     11 from dataclasses import dataclass
     12 from rdkit.Chem.Draw import rdDepictor

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/exmol/__init__.py:3
      1 from .version import __version__
      2 from . import stoned
----> 3 from .exmol import *
      4 from .data import *
      5 from .stoned import sanitize_smiles

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/exmol/exmol.py:30
     28 from rdkit.Chem import rdchem  # type: ignore
     29 from rdkit.DataStructs.cDataStructs import BulkTanimotoSimilarity, TanimotoSimilarity  # type: ignore
---> 30 import langchain.llms as llms
     31 import langchain.prompts as prompts
     33 from . import stoned

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/__init__.py:22
      1 """
      2 **LLM** classes provide
      3 access to the large language model (**LLM**) APIs and services.
   (...)
     18     AIMessage, BaseMessage
     19 """  # noqa: E501
     20 from typing import Any, Callable, Dict, Type
---> 22 from langchain.llms.base import BaseLLM
     25 def _import_ai21() -> Any:
     26     from langchain.llms.ai21 import AI21

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain/llms/base.py:2
      1 # Backwards compatibility.
----> 2 from langchain_core.language_models import BaseLanguageModel
      3 from langchain_core.language_models.llms import (
      4     LLM,
      5     BaseLLM,
   (...)
      9     update_cache,
     10 )
     12 __all__ = [
     13     "create_base_retry_decorator",
     14     "get_prompts",
   (...)
     19     "LLM",
     20 ]

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain_core/language_models/__init__.py:7
      1 from langchain_core.language_models.base import (
      2     BaseLanguageModel,
      3     LanguageModelInput,
      4     LanguageModelOutput,
      5     get_tokenizer,
      6 )
----> 7 from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
      8 from langchain_core.language_models.llms import LLM, BaseLLM
     10 __all__ = [
     11     "BaseLanguageModel",
     12     "BaseChatModel",
   (...)
     18     "LanguageModelOutput",
     19 ]

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain_core/language_models/chat_models.py:20
      7 from functools import partial
      8 from typing import (
      9     TYPE_CHECKING,
     10     Any,
   (...)
     17     cast,
     18 )
---> 20 from langchain_core.callbacks import (
     21     AsyncCallbackManager,
     22     AsyncCallbackManagerForLLMRun,
     23     BaseCallbackManager,
     24     CallbackManager,
     25     CallbackManagerForLLMRun,
     26     Callbacks,
     27 )
     28 from langchain_core.globals import get_llm_cache
     29 from langchain_core.language_models.base import BaseLanguageModel, LanguageModelInput

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain_core/callbacks/__init__.py:13
      1 from langchain_core.callbacks.base import (
      2     AsyncCallbackHandler,
      3     BaseCallbackHandler,
   (...)
     11     ToolManagerMixin,
     12 )
---> 13 from langchain_core.callbacks.manager import (
     14     AsyncCallbackManager,
     15     AsyncCallbackManagerForChainGroup,
     16     AsyncCallbackManagerForChainRun,
     17     AsyncCallbackManagerForLLMRun,
     18     AsyncCallbackManagerForRetrieverRun,
     19     AsyncCallbackManagerForToolRun,
     20     AsyncParentRunManager,
     21     AsyncRunManager,
     22     BaseRunManager,
     23     CallbackManager,
     24     CallbackManagerForChainGroup,
     25     CallbackManagerForChainRun,
     26     CallbackManagerForLLMRun,
     27     CallbackManagerForRetrieverRun,
     28     CallbackManagerForToolRun,
     29     ParentRunManager,
     30     RunManager,
     31 )
     32 from langchain_core.callbacks.stdout import StdOutCallbackHandler
     33 from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langchain_core/callbacks/manager.py:26
      9 from typing import (
     10     TYPE_CHECKING,
     11     Any,
   (...)
     22     cast,
     23 )
     24 from uuid import UUID
---> 26 from langsmith.run_helpers import get_run_tree_context
     27 from tenacity import RetryCallState
     29 from langchain_core.callbacks.base import (
     30     BaseCallbackHandler,
     31     BaseCallbackManager,
   (...)
     37     ToolManagerMixin,
     38 )

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langsmith/__init__.py:10
      6 except metadata.PackageNotFoundError:
      7     # Case where package metadata is not available.
      8     __version__ = ""
---> 10 from langsmith.client import Client
     11 from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator
     12 from langsmith.run_helpers import trace, traceable

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langsmith/client.py:43
     41 from langsmith import schemas as ls_schemas
     42 from langsmith import utils as ls_utils
---> 43 from langsmith.evaluation import evaluator as ls_evaluator
     45 if TYPE_CHECKING:
     46     import pandas as pd

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langsmith/evaluation/__init__.py:4
      1 """Evaluation Helpers."""
      3 from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator
----> 4 from langsmith.evaluation.string_evaluator import StringEvaluator
      6 __all__ = ["EvaluationResult", "RunEvaluator", "StringEvaluator"]

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/langsmith/evaluation/string_evaluator.py:3
      1 from typing import Callable, Dict, Optional
----> 3 from pydantic import BaseModel
      5 from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator
      6 from langsmith.schemas import Example, Run

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/pydantic/__init__.py:372, in __getattr__(attr_name)
    370     return import_module(f'.{attr_name}', package=package)
    371 else:
--> 372     module = import_module(module_name, package=package)
    373     return getattr(module, attr_name)

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/importlib/__init__.py:127, in import_module(name, package)
    125             break
    126         level += 1
--> 127 return _bootstrap._gcd_import(name[level:], package, level)

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/pydantic/main.py:15
     12 import typing_extensions
     13 from pydantic_core import PydanticUndefined
---> 15 from ._internal import (
     16     _config,
     17     _decorators,
     18     _fields,
     19     _forward_ref,
     20     _generics,
     21     _mock_val_ser,
     22     _model_construction,
     23     _repr,
     24     _typing_extra,
     25     _utils,
     26 )
     27 from ._migration import getattr_migration
     28 from .annotated_handlers import GetCoreSchemaHandler, GetJsonSchemaHandler

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/pydantic/_internal/_decorators.py:15
     12 from typing_extensions import Literal, TypeAlias, is_typeddict
     14 from ..errors import PydanticUserError
---> 15 from ._core_utils import get_type_ref
     16 from ._internal_dataclass import slots_true
     17 from ._typing_extra import get_function_type_hints

File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/pydantic/_internal/_core_utils.py:16
     14 from pydantic_core import CoreSchema, core_schema
     15 from pydantic_core import validate_core_schema as _validate_core_schema
---> 16 from typing_extensions import TypeAliasType, TypeGuard, get_args, get_origin
     18 from . import _repr
     19 from ._typing_extra import is_generic_alias

ImportError: cannot import name 'TypeAliasType' from 'typing_extensions' (/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/typing_extensions.py)
# REDUCED Data FOR CI
soldata = soldata.sample(frac=0.1, random_state=0).reset_index(drop=True)
soldata.head()
selfies_list = []
for s in soldata.SMILES:
    try:
        selfies_list.append(sf.encoder(exmol.sanitize_smiles(s)[1]))
    except sf.EncoderError:
        selfies_list.append(None)
len(selfies_list)
basic = set(exmol.get_basic_alphabet())
data_vocab = set(
    sf.get_alphabet_from_selfies([s for s in selfies_list if s is not None])
)
vocab = ["[nop]"]
vocab.extend(list(data_vocab.union(basic)))
vocab_stoi = {o: i for o, i in zip(vocab, range(len(vocab)))}


def selfies2ints(s):
    result = []
    for token in sf.split_selfies(s):
        if token == ".":
            continue  # ?
        if token in vocab_stoi:
            result.append(vocab_stoi[token])
        else:
            result.append(np.nan)
            # print('Warning')
    return result


def ints2selfies(v):
    return "".join([vocab[i] for i in v])


# test them out
s = selfies_list[0]
print("selfies:", s)
v = selfies2ints(s)
print("selfies2ints:", v)
so = ints2selfies(v)
print("ints2selfes:", so)
assert so == s.replace(
    ".", ""
)  # make sure '.' is removed from Selfies string during assertion
@dataclass
class Config:
    vocab_size: int
    example_number: int
    batch_size: int
    buffer_size: int
    embedding_dim: int
    rnn_units: int
    hidden_dim: int


config = Config(
    vocab_size=len(vocab),
    example_number=len(selfies_list),
    batch_size=16,
    buffer_size=10000,
    embedding_dim=256,
    hidden_dim=128,
    rnn_units=128,
)
# now get sequences
encoded = [selfies2ints(s) for s in selfies_list if s is not None]
padded_seqs = tf.keras.preprocessing.sequence.pad_sequences(encoded, padding="post")

# Now build dataset
data = tf.data.Dataset.from_tensor_slices(
    (padded_seqs, soldata.Solubility.iloc[[bool(s) for s in selfies_list]].values)
)
# now split into val, test, train and batch
N = len(data)
split = int(0.1 * N)
test_data = data.take(split).batch(config.batch_size)
nontest = data.skip(split)
val_data, train_data = nontest.take(split).batch(config.batch_size), nontest.skip(
    split
).shuffle(config.buffer_size).batch(config.batch_size).prefetch(
    tf.data.experimental.AUTOTUNE
)
model = tf.keras.Sequential()

# make embedding and indicate that 0 should be treated as padding mask
model.add(
    tf.keras.layers.Embedding(
        input_dim=config.vocab_size, output_dim=config.embedding_dim, mask_zero=True
    )
)

# RNN layer
model.add(tf.keras.layers.GRU(config.rnn_units))
# a dense hidden layer
model.add(tf.keras.layers.Dense(config.hidden_dim, activation="relu"))
# regression, so no activation
model.add(tf.keras.layers.Dense(1))

model.summary()
model.compile(tf.optimizers.Adam(1e-4), loss="mean_squared_error")
result = model.fit(train_data, validation_data=val_data, epochs=100, verbose=0)
plt.plot(result.history["loss"], label="training")
plt.plot(result.history["val_loss"], label="validation")
plt.legend()
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
yhat = []
test_y = []
for x, y in test_data:
    yhat.extend(model(x).numpy().flatten())
    test_y.extend(y.numpy().flatten())
yhat = np.array(yhat)
test_y = np.array(test_y)

# plot test data
plt.plot(test_y, test_y, ":")
plt.plot(test_y, yhat, ".")
plt.text(min(y) - 7, max(y) - 2, f"correlation = {np.corrcoef(test_y, yhat)[0,1]:.3f}")
plt.text(min(y) - 7, max(y) - 3, f"loss = {np.sqrt(np.mean((test_y - yhat)**2)):.3f}")
plt.title("Testing Data")
plt.savefig("rnn-fit.png", dpi=300)
plt.show()

CF explanation:

In the following example let’s say we would like our molecules to return a solubility value of -3.5. Here we use MMACE algorithm to createcounter factual explanations. In other words, we would like to see what are the minimal mutations that could to be done to our input structure to get our desired solubility.

def predictor_function(smile_list, selfies):
    encoded = [selfies2ints(s) for s in selfies]
    # check for nans
    valid = [1.0 if sum(e) > 0 else np.nan for e in encoded]
    encoded = [np.nan_to_num(e, nan=0) for e in encoded]
    padded_seqs = tf.keras.preprocessing.sequence.pad_sequences(encoded, padding="post")
    labels = np.reshape(model.predict(padded_seqs), (-1))
    return labels * valid
predictor_function([], ["[C][C][O]", "[C][C][Nop][O]"])
stoned_kwargs = {
    "num_samples": 2500,
    "alphabet": exmol.get_basic_alphabet(),
    "max_mutations": 2,
}
space = exmol.sample_space(
    soldata.SMILES[4], predictor_function, stoned_kwargs=stoned_kwargs, quiet=True
)
exps = exmol.rcf_explain(space, 0.5, nmols=4)
fkw = {"figsize": (10, 3)}
exmol.plot_cf(exps, figure_kwargs=fkw, mol_size=(450, 400), nrows=1)
plt.savefig("rnn-simple.png", bbox_inches="tight", dpi=180)
svg = exmol.insert_svg(exps, mol_fontsize=16)
with open("rnn-simple.svg", "w") as f:
    f.write(svg)
fkw = {"figsize": (10, 4)}
font = {"family": "normal", "weight": "normal", "size": 22}

exmol.plot_space(space, exps, figure_kwargs=fkw, mol_size=(100, 100), offset=1)
ax = plt.gca()
plt.colorbar(
    ax.get_children()[1],
    ax=[ax],
    label="Solubility [Log M]",
    location="left",
    shrink=0.8,
)
plt.savefig("rnn-space.png", bbox_inches="tight", dpi=180)
svg = exmol.insert_svg(exps, mol_fontsize=16)
with open("svg_figs/rnn-space.svg", "w") as f:
    f.write(svg)
space = exmol.sample_space(
    soldata.SMILES[4], predictor_function, preset="wide", quiet=True
)
exps = exmol.rcf_explain(space, 0.5)
fkw = {"figsize": (8, 6)}
font = {"family": "normal", "weight": "normal", "size": 22}


exmol.plot_space(space, exps, figure_kwargs=fkw, mol_size=(200, 200), offset=1)
ax = plt.gca()
plt.colorbar(ax.get_children()[1], ax=[ax], location="left", label="Solubility [Log M]")
plt.savefig("rnn-wide.png", bbox_inches="tight", dpi=180)
svg = exmol.insert_svg(exps, mol_fontsize=16)
with open("rnn-space-wide.svg", "w") as f:
    f.write(svg)

Figure showing effect of mutation number and Alphabet

exps = []
spaces = []
for i in [1, 3, 5]:
    stoned_kwargs = {
        "num_samples": 2500,
        "alphabet": exmol.get_basic_alphabet(),
        "min_mutations": i,
        "max_mutations": i,
    }
    space = exmol.sample_space(
        soldata.SMILES[4], predictor_function, stoned_kwargs=stoned_kwargs, quiet=True
    )
    spaces.append(space)
    e = exmol.rcf_explain(space, nmols=2)
    if len(exps) == 0:
        exps.append(e[0])
    for ei in e:
        if not ei.is_origin and "Decrease" in ei.label:
            ei.label = f"Mutations = {i}"
            exps.append(ei)
            break
fkw = {"figsize": (10, 4)}
exmol.plot_cf(exps, figure_kwargs=fkw, mol_fontsize=26, mol_size=(400, 400), nrows=1)
plt.savefig("rnn-mutations.png", bbox_inches="tight", dpi=180)
svg = exmol.insert_svg(exps, mol_fontsize=16)
with open("rnn-mutations.svg", "w") as f:
    f.write(svg)
fig, axs = plt.subplots(1, 3, figsize=(8, 3), dpi=180, squeeze=True, sharey=True)
for i, n in enumerate([1, 3, 5]):
    axs[i].hist([e.similarity for e in spaces[i][1:]], bins=99, edgecolor="none")
    axs[i].set_title(f"Mutations = {n}")
    axs[i].set_xlim(0, 1)
plt.tight_layout()
plt.savefig("rnn-mutation-hist.png", bbox_inches="tight", dpi=180)
basic = exmol.get_basic_alphabet()
train = sf.get_alphabet_from_selfies([s for s in selfies_list if s is not None])
wide = sf.get_semantic_robust_alphabet()

alphs = {"Basic": basic, "Training Data": train, "SELFIES": wide}

exps = []
for l, a in alphs.items():
    stoned_kwargs = {"num_samples": 2500 // 2, "alphabet": a, "max_mutations": 2}
    space = exmol.sample_space(
        soldata.SMILES[4], predictor_function, stoned_kwargs=stoned_kwargs, quiet=True
    )
    e = exmol.rcf_explain(space, nmols=2)
    if len(exps) == 0:
        exps.append(e[0])
    for ei in e:
        if not ei.is_origin and "Decrease" in ei.label:
            ei.label = f"Alphabet = {l}"
            exps.append(ei)
            break
fkw = {"figsize": (10, 4)}
exmol.plot_cf(exps, figure_kwargs=fkw, mol_fontsize=26, mol_size=(400, 400), nrows=1)
plt.savefig("rnn-alphabets.png", bbox_inches="tight", dpi=180)
svg = exmol.insert_svg(exps, mol_fontsize=16)