from IPython.display import Image
Image(filename="parity.png", width = 600, height = 300)
Image(filename="pv.png", width = 650, height = 300)
import torch
from e3nn import o3
scalar_irrep = o3.Irrep('0e')
pseudo_scalar_irrep = o3.Irrep('0o')
vector_irrep = o3.Irrep('1o')
pseudo_vector_irrep = o3.Irrep('1e')
scalar_irrep
vector_irrep
irreps = o3.Irreps('1x0e + 2x1o')
irreps
irreps.lmax
[(l, p) for l, p in irreps]
irreps_x = o3.Irreps('2x1o')
irreps_y = o3.Irreps('0e + 1e')
x = irreps_x.randn(-1)
y = irreps_y.randn(-1)
irreps_x.dim, irreps_y.dim
R = o3.rand_matrix()
D_x = irreps_x.D_from_matrix(R)
D_y = irreps_y.D_from_matrix(R)
A = torch.einsum('i,j', x, y)
A
from matplotlib import pyplot as plt
plt.figure(figsize=(15, 15))
plt.imshow(torch.kron(D_x, D_y));
tp = o3.FullTensorProduct(irreps_x, irreps_y)
print(tp)
tp(x, y)
D = tp.irreps_out.D_from_matrix(R)
plt.figure(figsize=(15,15))
plt.imshow(D);
!rm -rf ./results/
!rm -rf ./tutorial_data
!rm -rf ./tutorial-results
import torch
torch.set_default_dtype(torch.float32)
from nequip.utils import Config
config = Config.from_file('./tutorial.yaml')
import pprint
pprint.pprint(config.as_dict())
import logging
from nequip.train.trainer import Trainer
trainer = Trainer(model=None, **dict(config))
!mv benzene*.xyz ./tutorial_data
!rm *.zip
!curl http://quantum-machine.org/gdml/data/npz/benzene_ccsd_t.zip -o outfile.zip
!unzip outfile.zip
!rm -rf __MACOSX outfile outfile.zip
!rm -rf ./tutorial_data
!mkdir tutorial_data
!mv benzene* tutorial_data
!curl http://quantum-machine.org/gdml/data/xyz/benzene_ccsd_t.zip -o ourfile.zip
!unzip outfile.zip
!curl http://quantum-machine.org/gdml/data/xyz/benzene_ccsd_t.zip -o outfile.zip
!unzip outfile.zip
!mv benzene*.xyz ./tutorial_data
!rm *.zip
!ls tutorial_data
from ase.io import read
atoms = read('./tutorial_data/benzene_ccsd_t-train.xyz', index=0)
from ase.visualize import view
view(atoms, viewer='x3d')
from nequip.utils import dataset_from_config
dataset = dataset_from_config(config)
logging.info(f"Successfully loaded the data set of type {dataset}...")
trainer.set_dataset(dataset)
from nequip.data import AtomicDataDict
(
(forces_std,),
(energies_mean, energies_std),
(allowed_species, Z_count),
) = trainer.dataset_train.statistics(
fields=[
AtomicDataDict.FORCE_KEY,
AtomicDataDict.TOTAL_ENERGY_KEY,
AtomicDataDict.ATOMIC_NUMBERS_KEY,
],
modes=["rms", "mean_std", "count"],
)
from nequip.models import ForceModel
config.update(dict(allowed_species=allowed_species))
force_model_ = ForceModel(**dict(config))
import logging
from nequip.data import AtomicDataDict
from nequip.nn import (
GraphModuleMixin,
SequentialGraphNetwork,
AtomwiseLinear,
AtomwiseReduce,
ForceOutput,
PerSpeciesScaleShift,
ConvNetLayer,
)
from nequip.nn.embedding import (
OneHotAtomEncoding,
RadialBasisEdgeEncoding,
SphericalHarmonicEdgeAttrs,
)
def EnergyModel(**shared_params) -> SequentialGraphNetwork:
"""Base default energy model archetecture.
For minimal and full configuration option listings, see ``minimal.yaml`` and ``example.yaml``.
"""
logging.debug("Start building the network model")
num_layers = shared_params.pop("num_layers", 3)
add_per_species_shift = shared_params.pop("PerSpeciesScaleShift_enable", False)
layers = {
# -- Encode --
"one_hot": OneHotAtomEncoding,
"spharm_edges": SphericalHarmonicEdgeAttrs,
"radial_basis": RadialBasisEdgeEncoding,
# -- Embed features --
"chemical_embedding": AtomwiseLinear,
}
# add convnet layers
# insertion preserves order
for layer_i in range(num_layers):
layers[f"layer{layer_i}_convnet"] = ConvNetLayer
# .update also maintains insertion order
layers.update(
{
# -- output block --
"conv_to_output_hidden": AtomwiseLinear,
"output_hidden_to_scalar": (
AtomwiseLinear,
dict(irreps_out="1x0e", out_field=AtomicDataDict.PER_ATOM_ENERGY_KEY),
),
}
)
if add_per_species_shift:
layers["per_species_scale_shift"] = (
PerSpeciesScaleShift,
dict(
field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
out_field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
),
)
layers["total_energy_sum"] = (
AtomwiseReduce,
dict(
reduce="sum",
field=AtomicDataDict.PER_ATOM_ENERGY_KEY,
out_field=AtomicDataDict.TOTAL_ENERGY_KEY,
),
)
return SequentialGraphNetwork.from_parameters(
shared_params=shared_params,
layers=layers,
)
def ForceModel(**shared_params) -> GraphModuleMixin:
"""Base default energy and force model archetecture.
For minimal and full configuration option listings, see ``minimal.yaml`` and ``example.yaml``.
A convinience method, equivalent to constructing ``EnergyModel`` and passing it to ``nequip.nn.ForceOutput``.
"""
energy_model = EnergyModel(**shared_params)
return ForceOutput(energy_model=energy_model)
core_model = ForceModel(**dict(config))
logging.info("Successfully built the network...")
from nequip.nn import RescaleOutput
final_model = RescaleOutput(
model=core_model,
scale_keys=[AtomicDataDict.TOTAL_ENERGY_KEY, AtomicDataDict.FORCE_KEY],
scale_by=forces_std,
shift_keys=AtomicDataDict.TOTAL_ENERGY_KEY,
shift_by=energies_mean,
)
trainer.model = final_model
trainer.train()
import numpy as np
benzene_data = np.load(config.dataset_file_name)
r = benzene_data['R'][-1]
forces = benzene_data['F'][-1]
final_model.eval();
from nequip.data import AtomicData
data = AtomicData.from_points(
pos=r,
r_max=config['r_max'],
**{AtomicDataDict.ATOMIC_NUMBERS_KEY: torch.Tensor(torch.from_numpy(benzene_data['z'].astype(np.float32))).to(torch.int64)}
)
pred = core_model(AtomicData.to_AtomicDataDict(data))['forces']
from matplotlib import pyplot as plt
plt.figure(figsize=(12, 12))
plt.plot(
r[:, 0],
r[:, 1],
'.k',
markersize=10,
)
plt.quiver(
r[:, 0],
r[:, 1],
pred[:, 0].detach().numpy(),
pred[:, 1].detach().numpy(),
norm=None
)
plt.quiver(
r[:, 0],
r[:, 1],
forces[:, 0],
forces[:, 1],
color='red',
norm=None
)
plt.legend(['Positions', 'Predicted', 'True'], prop={'size': 20})
plt.show()