Skip to content

Commit

Permalink
Merge branch 'yves' of https://github.com/GT4SD/gt4sd-core into yves
Browse files Browse the repository at this point in the history
  • Loading branch information
yvesnana committed Mar 7, 2024
2 parents 3913c8f + 3641f39 commit f0c8fe9
Show file tree
Hide file tree
Showing 10 changed files with 206 additions and 91 deletions.
107 changes: 16 additions & 91 deletions examples/enzeptional/README.md
Original file line number Diff line number Diff line change
@@ -1,97 +1,22 @@
# Enzyme Optimization Experiment
# Enzyme Optimization in Biocatalytic Reactions

## Description
This script performs an optimization experiment for enzyme sequences using different mutation strategies.
This repository provides an exmaple on how ro run the framework for the optimization of enzymes within the context of biocatalytic reactions.

## Import modules
```python
import logging
import pandas as pd
from gt4sd.frameworks.enzeptional.processing import HFandTAPEModelUtility
from gt4sd.frameworks.enzeptional.core import SequenceMutator, EnzymeOptimizer
from gt4sd.configuration import sync_algorithm_with_s3
from gt4sd.configuration import GT4SDConfiguration
configuration = GT4SDConfiguration.get_instance()
```
## Prerequisites

## Load datasets and scorers
```python
sync_algorithm_with_s3("proteins/enzeptional/scorers", module="properties")
```
Feasibility scorer path
```python
scorer_path = f"{configuration.gt4sd_local_cache_path}/properties/proteins/enzeptional/scorers/feasibility/model.pkl"
```
## Set embedding model/tokenizer paths
```python
language_model_path = "facebook/esm2_t33_650M_UR50D"
tokenizer_path = "facebook/esm2_t33_650M_UR50D"
unmasking_model_path = "facebook/esm2_t33_650M_UR50D"
chem_model_path = "seyonec/ChemBERTa-zinc-base-v1"
chem_tokenizer_path = "seyonec/ChemBERTa-zinc-base-v1"
```
## Load protein embedding model
```python
protein_model = HFandTAPEModelUtility(
embedding_model_path=language_model_path, tokenizer_path=tokenizer_path
)
```
## Create mutation config
```python
mutation_config = {
"type": "language-modeling",
"embedding_model_path": language_model_path,
"tokenizer_path": tokenizer_path,
"unmasking_model_path": unmasking_model_path,
}
```
## Set key parameters
```python
intervals = [(5, 10), (20, 25)]
batch_size = 5
top_k = 3
substrate_smiles = "NC1=CC=C(N)C=C1"
product_smiles = "CNC1=CC=C(NC(=O)C2=CC=C(C=C2)C(C)=O)C=C1"
Before initiating the enzyme optimization process, execute the following command in your terminal to activate the environment:

sample_sequence = "MSKLLMIGTGPVAIDQFLTRYEASCQAYKDMHQDQQLSSQFNTNLFEGDKALVTKFLEINRTLS"
```console
conda activate gt4sd
```
## Load mutator
```python
mutator = SequenceMutator(sequence=sample_sequence, mutation_config=mutation_config)
```
## Set Optimizer
```python
optimizer = EnzymeOptimizer(
sequence=sample_sequence,
protein_model=protein_model,
substrate_smiles=substrate_smiles,
product_smiles=product_smiles,
chem_model_path=chem_model_path,
chem_tokenizer_path=chem_tokenizer_path,
scorer_filepath=scorer_path,
mutator=mutator,
intervals=intervals,
batch_size=batch_size,
top_k=top_k,
selection_ratio=0.25,
perform_crossover=True,
crossover_type="single_point",
concat_order=["substrate", "sequence", "product"],
)
```
## Define optmization parameters
```python
num_iterations = 3
num_sequences = 5
num_mutations = 5
time_budget = 3600
```
## Optimize
```python
optimized_sequences, iteration_info = optimizer.optimize(
num_iterations=num_iterations,
num_sequences=num_sequences,
num_mutations=num_mutations,
time_budget=time_budget,
)

## Citation

```bibtex
@inproceedings{teukam2023enzyme,
title={Enzyme optimization via a generative language modeling-based evolutionary algorithm},
author={Teukam, Yves Gaetan Nana and Grisoni, Francesca and Manica, Matteo and Zipoli, Federico and Laino, Teodoro},
booktitle={American Chemical Society (ACS) Spring Meeting},
year={2023}
}
```
106 changes: 106 additions & 0 deletions examples/enzeptional/data.csv

Large diffs are not rendered by default.

84 changes: 84 additions & 0 deletions examples/enzeptional/example_enzeptional.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import logging
import pandas as pd
from gt4sd.frameworks.enzeptional.processing import HFandTAPEModelUtility
from gt4sd.frameworks.enzeptional.core import SequenceMutator, EnzymeOptimizer
from gt4sd.configuration import GT4SDConfiguration, sync_algorithm_with_s3


def initialize_environment():
"""Synchronize with GT4SD S3 storage and set up the environment."""
# NOTE: For those interested in optimizing kcat values, it is important to adjust the scorer path to reflect this focus, thereby selecting the appropriate model for kcat optimization. The specification of the scaler, located within the same directory as the `scorer.pkl`, is mandatory for accurate model performance.
configuration = GT4SDConfiguration.get_instance()
sync_algorithm_with_s3("proteins/enzeptional/scorers", module="properties")
return f"{configuration.gt4sd_local_cache_path}/properties/proteins/enzeptional/scorers/feasibility/model.pkl"


def load_experiment_parameters():
"""Load experiment parameters from a CSV file."""
df = pd.read_csv("data.csv").iloc[1]
return df["substrates"], df["products"], df["sequences"], eval(df["intervals"])


def setup_optimizer(
substrate_smiles, product_smiles, sample_sequence, intervals, scorer_path
):
"""Set up and return the optimizer with all necessary components configured."""
model_tokenizer_paths = "facebook/esm2_t33_650M_UR50D"
chem_paths = "seyonec/ChemBERTa-zinc-base-v1"

protein_model = HFandTAPEModelUtility(
embedding_model_path=model_tokenizer_paths, tokenizer_path=model_tokenizer_paths
)
mutation_config = {
"type": "language-modeling",
"embedding_model_path": model_tokenizer_paths,
"tokenizer_path": model_tokenizer_paths,
"unmasking_model_path": model_tokenizer_paths,
}

mutator = SequenceMutator(sequence=sample_sequence, mutation_config=mutation_config)
optimizer_config = {
"sequence": sample_sequence,
"protein_model": protein_model,
"substrate_smiles": substrate_smiles,
"product_smiles": product_smiles,
"chem_model_path": chem_paths,
"chem_tokenizer_path": chem_paths,
"scorer_filepath": scorer_path,
"mutator": mutator,
"intervals": intervals,
"batch_size": 5,
"top_k": 3,
"selection_ratio": 0.25,
"perform_crossover": True,
"crossover_type": "single_point",
"concat_order": ["substrate", "sequence", "product"],
}
return EnzymeOptimizer(**optimizer_config)


def optimize_sequences(optimizer):
"""Optimize sequences using the configured optimizer."""
return optimizer.optimize(
num_iterations=3, num_sequences=5, num_mutations=5, time_budget=3600
)


def main():
logging.basicConfig(level=logging.INFO)
scorer_path = initialize_environment()
(
substrate_smiles,
product_smiles,
sample_sequence,
intervals,
) = load_experiment_parameters()
optimizer = setup_optimizer(
substrate_smiles, product_smiles, sample_sequence, intervals, scorer_path
)
optimized_sequences, iteration_info = optimize_sequences(optimizer)
logging.info("Optimization completed.")


if __name__ == "__main__":
main()
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file removed src/gt4sd/frameworks/enzeptional/tests/scorer.pkl
Binary file not shown.

0 comments on commit f0c8fe9

Please sign in to comment.