Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/dxtb/_src/scf/pure/iterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def scf_pure(
# compute one extra SCF cycle with strong damping.
# Note that this is not required for SCF with full gradient tracking.
# (see https://github.com/grimme-lab/xtbML/issues/124)
if cfg.scp_mode == labels.SCP_MODE_CHARGE:
if cfg.scp_mode in [labels.SCP_MODE_CHARGE, labels.SCP_MODE_FOCK]:
mixer = Simple({**cfg.fwd_options, "damp": 1e-4})
q_new = fcn(q_converged, data, cfg, interactions)
q_converged = mixer.iter(q_new, q_converged)
Expand Down
16 changes: 16 additions & 0 deletions test/test_fock/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# This file is part of dxtb.
#
# SPDX-Identifier: Apache-2.0
# Copyright (C) 2024 Grimme Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
39 changes: 39 additions & 0 deletions test/test_fock/samples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# This file is part of dxtb.
#
# SPDX-Identifier: Apache-2.0
# Copyright (C) 2024 Grimme Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Molecules for testing the Hamiltonian. Reference values are stored in npz file.
"""

from __future__ import annotations

from tad_mctc.data.molecules import mols

from dxtb._src.typing import Molecule

extra: dict[str, Molecule] = {
"H2_nocn": {
"numbers": mols["H2"]["numbers"],
"positions": mols["H2"]["positions"],
},
"SiH4_nocn": {
"numbers": mols["SiH4"]["numbers"],
"positions": mols["SiH4"]["positions"],
},
}


samples: dict[str, Molecule] = {**mols, **extra}
82 changes: 82 additions & 0 deletions test/test_fock/test_grad_pos.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# This file is part of dxtb.
#
# SPDX-Identifier: Apache-2.0
# Copyright (C) 2024 Grimme Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing overlap gradient (autodiff).
"""

from __future__ import annotations

import pytest
import torch
from tad_mctc.autograd import dgradcheck

from dxtb import GFN1_XTB as par
from dxtb import Calculator, OutputHandler, labels

Check notice

Code scanning / CodeQL

Unused import Note test

Import of 'labels' is not used.
from dxtb._src.typing import DD, Callable, Tensor
from dxtb.config import ConfigCache

from ..conftest import DEVICE
from .samples import samples

sample_list = ["H2", "HHe", "LiH", "S2", "H2O", "SiH4"]

# remove HHe as it does not pass the numerical gradient check
sample_list = [s for s in sample_list if s not in ["HHe"]]

tol = 5e-4 # increased tolerance


def gradchecker(
dtype: torch.dtype, name: str, scp_mode: str
) -> tuple[Callable[[Tensor], Tensor], Tensor]:
"""Prepare gradient check from `torch.autograd`."""
dd: DD = {"dtype": dtype, "device": DEVICE}

sample = samples[name]
numbers = sample["numbers"].to(DEVICE)
positions = sample["positions"].to(**dd)

opts = {
"scf_mode": "implicit",
"scp_mode": scp_mode,
}

calc = Calculator(numbers, par, **dd, opts=opts)
calc.opts.cache = ConfigCache(enabled=False, fock=True)
OutputHandler.verbosity = 0

# variables to be differentiated
pos = positions.clone().requires_grad_(True)

def func(p: Tensor) -> Tensor:
_ = calc.get_energy(p) # triggers Fock matrix computation
return calc.cache["fock"]

return func, pos


@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("scp_mode", ["charge", "potential", "fock"])
def test_grad_fock(dtype: torch.dtype, name: str, scp_mode: str) -> None:
"""
Check analytical gradient of Fock matrix against numerical
gradient from `torch.autograd.gradcheck`.
"""
func, diffvars = gradchecker(dtype, name, scp_mode)
assert dgradcheck(func, diffvars, atol=tol)