Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Add metatensor #4970

Open
wants to merge 11 commits into
base: python
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ option(ESPRESSO_BUILD_TESTS "Enable tests" ON)
option(ESPRESSO_BUILD_WITH_SCAFACOS "Build with ScaFaCoS support" OFF)
option(ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS "Build with Stokesian Dynamics"
OFF)
option(ESPRESSO_BUILD_WITH_METATENSOR "Build with Metatensor support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA
"Build with waLBerla lattice-Boltzmann support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA_AVX
Expand Down Expand Up @@ -595,6 +596,47 @@ if(ESPRESSO_BUILD_BENCHMARKS)
add_subdirectory(maintainer/benchmarks)
endif()

#
# Metatensor
#

if(ESPRESSO_BUILD_WITH_METATENSOR)
# Bring the `torch` target in scope to allow evaluation of cmake generator
# expression from `metatensor_torch`
find_package(Torch REQUIRED)

# # cmake-format: off set(METATENSOR_URL_BASE
# "https://github.com/lab-cosmo/metatensor/releases/download")
# set(METATENSOR_CORE_VERSION "0.1.8") set(METATENSOR_TORCH_VERSION "0.5.3")
#
# include(FetchContent) set(BUILD_SHARED_LIBS on CACHE BOOL "")
# FetchContent_Declare( metatensor URL
# "${METATENSOR_URL_BASE}/metatensor-core-v${METATENSOR_CORE_VERSION}/metatensor-core-cxx-${METATENSOR_CORE_VERSION}.tar.gz"
# URL_HASH SHA1=3ed389770e5ec6dbb8cbc9ed88f84d6809b552ef )
# set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# # workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/21146
# if(NOT DEFINED metatensor_SOURCE_DIR OR NOT EXISTS
# "${metatensor_SOURCE_DIR}") message(STATUS "Fetching metatensor
# v${METATENSOR_CORE_VERSION} from github") FetchContent_Populate(metatensor)
# endif() set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# FetchContent_Declare( metatensor_torch URL
# "${METATENSOR_URL_BASE}/metatensor-torch-v${METATENSOR_TORCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_VERSION}.tar.gz"
# ) set(BUILD_SHARED_LIBS on CACHE BOOL "") if(NOT DEFINED
# metatensor_torch_SOURCE_DIR OR NOT EXISTS "${metatensor_torch_SOURCE_DIR}")
# message(STATUS "Fetching metatensor torch v${METATENSOR_CORE_VERSION} from
# github") FetchContent_Populate(metatensor_torch) endif() # cmake-format: on
# set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# set(METATENSOR_INSTALL_BOTH_STATIC_SHARED on CACHE BOOL "")
# add_subdirectory("${metatensor_SOURCE_DIR}")
# add_subdirectory("${metatensor_torch_SOURCE_DIR}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
find_package(metatensor)
find_package(metatensor_torch)
endif()

#
# waLBerla
#
Expand Down
2 changes: 2 additions & 0 deletions cmake/espresso_cmake_config.cmakein
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

#cmakedefine ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS

#cmakedefine ESPRESSO_BUILD_WITH_METATENSOR

#cmakedefine ESPRESSO_BUILD_WITH_WALBERLA

#cmakedefine ESPRESSO_BUILD_WITH_WALBERLA_FFT
Expand Down
Binary file added lennard-jones.pt
Binary file not shown.
1 change: 1 addition & 0 deletions src/config/features.def
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ HDF5 external
SCAFACOS external
GSL external
STOKESIAN_DYNAMICS external
METATENSOR external
WALBERLA external
WALBERLA_FFT external
VALGRIND external
Expand Down
6 changes: 6 additions & 0 deletions src/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ if(ESPRESSO_BUILD_WITH_CUDA)
install(TARGETS espresso_cuda
LIBRARY DESTINATION ${ESPRESSO_INSTALL_PYTHON}/espressomd)
endif()
if(ESPRESSO_BUILD_WITH_METATENSOR)
target_link_libraries(espresso_core PUBLIC "${TORCH_LIBRARIES}")
target_link_libraries(espresso_core PUBLIC metatensor::shared)
target_link_libraries(espresso_core PUBLIC metatensor_torch)
endif()

install(TARGETS espresso_core
LIBRARY DESTINATION ${ESPRESSO_INSTALL_PYTHON}/espressomd)
Expand Down Expand Up @@ -111,6 +116,7 @@ add_subdirectory(immersed_boundary)
add_subdirectory(integrators)
add_subdirectory(io)
add_subdirectory(lb)
add_subdirectory(ml_metatensor)
add_subdirectory(magnetostatics)
add_subdirectory(nonbonded_interactions)
add_subdirectory(object-in-fluid)
Expand Down
21 changes: 21 additions & 0 deletions src/core/ml_metatensor/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#
# Copyright (C) 2018-2022 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#

target_sources(espresso_core PRIVATE stub.cpp)
# target_sources(espresso_core PRIVATE load_model.cpp)
71 changes: 71 additions & 0 deletions src/core/ml_metatensor/add_neighbor_list.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#include "metatensor/torch/atomistic/system.hpp"
#include "utils/Vector.hpp"
#include <variant>

struct PairInfo {
int part_id_1;
int part_id_2;
Utils::Vector3d distance;
};

using Sample = std::array<int32_t, 5>;
using Distances = std::variant<std::vector<std::array<double, 3>>,
std::vector<std::array<float, 3>>>;

template <typename PairIterable>
metatensor_torch::TorchTensorBlock
neighbor_list_from_pairs(const metatensor_torch::System &system,
const PairIterable &pairs) {
auto dtype = system->positions().scalar_type();
auto device = system->positions().device();
std::vector<Sample> samples;
Distances distances;

if (dtype == torch::kFloat64) {
distances = {std::vector<std::array<double, 3>>()};
} else if (dtype == torch::kFloat32) {
distances = {std::vector<std::array<float, 3>>()};
} else {
throw std::runtime_error("Unsupported floating point data type");
}

for (auto const &pair : pairs) {
samples.emplace_back(pair.particle_id_1, pair.particle_id_2, 0, 0, 0);
std::visit([&pair](auto &vec) { vec.push_back(pair.distance); }, distances);
}

auto n_pairs = static_cast<int64_t>(samples.size());

auto samples_tensor = torch::from_blob(
samples.data(), {n_pairs, 5},
torch::TensorOptions().dtype(torch::kInt32).device(torch::kCPU));

auto samples_ptr = torch::make_intrusive<metatensor_torch::LabelsHolder>(
std::vector<std::string>{"first_atom", "second_atom", "cell_shift_a",
"cell_shift_b", "cell_shift_c"},
samples);

auto distances_vectors = torch::from_blob(
std::visit([](auto &vec) { return vec.data(); }, distances),
{n_pairs, 3, 1}, torch::TensorOptions().dtype(dtype).device(torch::kCPU));

auto neighbors = torch::make_intrusive<metatensor_torch::TensorBlockHolder>(
distances_vectors.to(dtype).to(device), samples_ptr->to(device),
std::vector<metatensor_torch::TorchLabels>{
metatensor_torch::LabelsHolder::create({"xyz"}, {{0}, {1}, {2}})
->to(device),
},
metatensor_torch::LabelsHolder::create({"distance"}, {{0}})->to(device));

return neighbors;
}

void add_neighbor_list_to_system(
metatensor_torch::System &system,
const metatensor_torch::TorchTensorBlock &neighbors,
const metatensor_torch::NeighborListOptions &options,
bool check_consistency) {
metatensor_torch::register_autograd_neighbors(system, neighbors,
check_consistency);
system->add_neighbor_list(options, neighbors);
}
82 changes: 82 additions & 0 deletions src/core/ml_metatensor/compute.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
#include "metatensor/torch/atomistic/system.hpp"
#include <metatensor/torch/atomistic/model.hpp>
#include <metatensor/torch/tensor.hpp>

metatensor_torch::TensorMapHolder
run_model(metatensor_torch::System &system, int64_t n_particles,
const metatensor_torch::ModelEvaluationOptions evaluation_options,
torch::Dtype dtype, torch::Device device, bool check_consistency) {

// only run the calculation for atoms actually in the current domain
auto options = torch::TensorOptions().dtype(torch::kInt32);
auto selected_atoms_values = torch::zeros({n_particles, 2}, options);

for (int i = 0; i < n_particles; i++) {
selected_atoms_values[i][0] = 0;
selected_atoms_values[i][1] = i;
}
auto selected_atoms = torch::make_intrusive<metatensor_torch::LabelsHolder>(
std::vector<std::string>{"system", "atom"}, selected_atoms_values);
evaluation_options->set_selected_atoms(selected_atoms->to(device));

torch::IValue result_ivalue;
model->forward({std::vector<metatensor_torch::System>{system},
evaluation_options, check_consistency});

auto result = result_ivalue.toGenericDict();
auto energy =
result.at("energy").toCustomClass<metatensor_torch::TensorMapHolder>();
auto energy_tensor =
metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
}

double get_energy(metatensor_torch::TensorMapHolder &energy,
bool energy_is_per_atom) {
auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
auto energy_tensor = energy_block->values();
auto energy_detached =
energy_tensor.detach().to(torch::kCPU).to(torch::kFloat64);
auto energy_samples = energy_block->samples();

// store the energy returned by the model
torch::Tensor global_energy;
if (energy_is_per_atom) {
assert(energy_samples->size() == 2);
assert(energy_samples->names()[0] == "system");
assert(energy_samples->names()[1] == "atom");

auto samples_values = energy_samples->values().to(torch::kCPU);
auto samples = samples_values.accessor<int32_t, 2>();

// int n_atoms = selected_atoms_values.sizes();
// assert(samples_values.sizes() == selected_atoms_values.sizes());

auto energies = energy_detached.accessor<double, 2>();
global_energy = energy_detached.sum(0);
assert(energy_detached.sizes() == std::vector<int64_t>({1}));
} else {
assert(energy_samples->size() == 1);
assert(energy_samples->names()[0] == "system");

assert(energy_detached.sizes() == std::vector<int64_t>({1, 1}));
global_energy = energy_detached.reshape({1});
}

return global_energy.item<double>();
}

torch::Tensor get_forces(metatensor::TensorMap &energy,
metatensor_torch::System &system) {
// reset gradients to zero before calling backward
system->positions().mutable_grad() = torch::Tensor();

auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
auto energy_tensor = energy_block->values();

// compute forces/virial with backward propagation
energy_tensor.backward(-torch::ones_like(energy_tensor));
auto forces_tensor = system->positions().grad();
assert(forces_tensor.is_cpu() &&
forces_tensor.scalar_type() == torch::kFloat64);
return forces_tensor;
}
15 changes: 15 additions & 0 deletions src/core/ml_metatensor/stub.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#include "config/config.hpp"

#ifdef METATENSOR
#undef CUDA
#include <torch/cuda.h>
#include <torch/script.h>
#include <torch/version.h>

#if TORCH_VERSION_MAJOR >= 2
#include <torch/mps.h>
#endif

#include <metatensor/torch.hpp>
#include <metatensor/torch/atomistic.hpp>
#endif
9 changes: 9 additions & 0 deletions src/core/ml_metatensor/stub.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#include "config/config.hpp"

#ifdef METATENSOR
#undef CUDA

#include <torch/cuda.h>
#include <torch/script.h>
#include <torch/version.h>
#endif
48 changes: 48 additions & 0 deletions src/core/ml_metatensor/system.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#include "ATen/core/TensorBody.h"
#include "metatensor/torch/atomistic/system.hpp"
#include "utils/Vector.hpp"
#include <unordered_map>

using ParticleTypeMap = std::unordered_map<int, int>;

metatensor_torch::System ::system_from_lmp(
const ParticleTypeMap &type_map, std::vector<double> &engine_positions,
const std::vector<double>
&engine_particle_types, // TODO: This should be std::vector<int>?
const Utils::Vector3d &box_size, bool do_virial, torch::ScalarType dtype,
torch::Device device) {
auto tensor_options =
torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCPU);
if (engine_positions.size() % 3 != 0)
throw std::runtime_error(
"Position array must have a multiple of 3 elements");
const auto n_particles = static_cast<int64_t>(engine_positions.size()) / 3;
if (engine_particle_types.size() != n_particles)
throw std::runtime_error(
"Length of position and particle type arrays inconsistent");

auto positions = torch::from_blob(
engine_positions.data(), {n_particles, 3},
// requires_grad=true since we always need gradients w.r.t. positions
tensor_options.requires_grad(true));
std::vector<int> particle_types_ml;
std::ranges::transform(
engine_particle_types, std::back_inserter(particle_types_ml),
[&type_map](int engine_type) { return type_map.at(engine_type); });

auto particle_types_ml_tensor =
torch::from_blob(particle_types_ml.data(),
{static_cast<int64_t>(particle_types_ml.size())},
tensor_options.requires_grad(true));

auto cell = torch::zeros({3, 3}, tensor_options);
for (int i : {0, 1, 2})
cell[i][i] = box_size[i];

positions.to(dtype).to(device);
cell = cell.to(dtype).to(device);

auto system = torch::make_intrusive<metatensor_torch::SystemHolder>(
particle_types_ml_tensor.to(device), positions, cell);
return system;
}