diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c58bace..e5042c6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,11 @@ repos: -- repo: https://github.com/ambv/black - rev: 23.9.1 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.3.5 hooks: - - id: black - language_version: python3 + # Run the linter. + - id: ruff + # --select I sorts imports + args: [ --fix, --extend-select, I ] + # Run the formatter. + - id: ruff-format diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 0000000..ebf8008 --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,78 @@ +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Same as Black. +line-length = 88 +indent-width = 4 + +# Assume Python 3.8 +target-version = "py38" + +[lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = ["E4", "E7", "E9", "F"] +ignore = ["E741"] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +# Enable auto-formatting of code examples in docstrings. Markdown, +# reStructuredText code/literal blocks and doctests are all supported. +# +# This is currently disabled by default, but it is planned for this +# to be opt-out in the future. +docstring-code-format = false + +# Set the line length limit used when formatting code snippets in +# docstrings. +# +# This only has an effect when the `docstring-code-format` setting is +# enabled. +docstring-code-line-length = "dynamic" + diff --git a/TB2J/Jdownfolder.py b/TB2J/Jdownfolder.py index c77631e..2dcf67c 100644 --- a/TB2J/Jdownfolder.py +++ b/TB2J/Jdownfolder.py @@ -1,7 +1,9 @@ import os from collections import defaultdict + import numpy as np from ase.dft.kpoints import monkhorst_pack + from TB2J.io_exchange import SpinIO from TB2J.Jtensor import decompose_J_tensor @@ -73,8 +75,8 @@ def downfold_oneq(self, J): class PWFDownfolder: def __init__(self, JR, Rlist, iM, iL, qmesh, atoms=None, iso_only=False, **kwargs): from lawaf.interfaces.magnon.magnon_downfolder import ( - MagnonWrapper, MagnonDownfolder, + MagnonWrapper, ) model = MagnonWrapper(JR, Rlist, atoms) @@ -92,13 +94,13 @@ def __init__(self, JR, Rlist, iM, iL, qmesh, atoms=None, iso_only=False, **kwarg # anchors={(0, 0, 0): (-1, -2, -3, -4)}, # anchors={(0, 0, 0): ()}, # use_proj=True, - enhance_Amn=2.0, + enhance_Amn=0.0, ) params.update(kwargs) wann.set_parameters(**params) print("begin downfold") ewf = wann.downfold() - ewf.save_hr_pickle("downfolded_JR.pickle") + # ewf.save_hr_pickle("downfolded_JR.pickle") # Plot the band structure. wann.plot_band_fitting( @@ -135,7 +137,7 @@ def __init__( qmesh=[7, 7, 7], iso_only=False, method="pwf", - **kwargs + **kwargs, ): self.exc = SpinIO.load_pickle(path=inpath, fname="TB2J.pickle") @@ -193,7 +195,7 @@ def _downfold(self, **kwargs): qmesh=self.qmesh, atoms=self.atoms, iso_only=self.iso_only, - **kwargs + **kwargs, ) Jd, Rlist = d.get_JR() return Jd, Rlist @@ -274,10 +276,10 @@ def _prepare_index_spin(self): def test(): # pass # inpath = "/home/hexu/projects/NiCl2/vasp_inputs/TB2J_results" - # inpath = "/home/hexu/projects/TB2J_example/CrI3/TB2J_results" - inpath = "/home/hexu/projects/TB2J_projects/NiCl2/TB2J_NiCl/TB2J_results" - fname = os.path.join(inpath, "TB2J.pickle") - p = JDownfolder_pickle( + inpath = "/home/hexu/projects/TB2J_example/CrI3/TB2J_results" + # inpath = "/home/hexu/projects/TB2J_projects/NiCl2/TB2J_NiCl/TB2J_results" + _fname = os.path.join(inpath, "TB2J.pickle") + _p = JDownfolder_pickle( inpath=inpath, metals=["Ni"], ligands=["Cl"], outpath="TB2J_results_downfolded" ) diff --git a/TB2J/MAE.py b/TB2J/MAE.py new file mode 100644 index 0000000..70878ee --- /dev/null +++ b/TB2J/MAE.py @@ -0,0 +1,305 @@ +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import tqdm + +# from TB2J.abacus.abacus_wrapper import AbacusSplitSOCParser +from HamiltonIO.abacus.abacus_wrapper import AbacusSplitSOCParser +from HamiltonIO.model.occupations import Occupations +from HamiltonIO.siesta import SislParser +from scipy.linalg import eigh + +from TB2J.contour import Contour +from TB2J.green import TBGreen + +# from HamiltonIO.model.rotate_spin import rotate_Matrix_from_z_to_axis, rotate_Matrix_from_z_to_sperical +from TB2J.kpoints import monkhorst_pack +from TB2J.mathutils.kR_convert import R_to_k + +# from TB2J.abacus.abacus_wrapper import AbacusWrapper, AbacusParser +from TB2J.mathutils.rotate_spin import ( + rotate_spinor_matrix, +) + + +def get_occupation(evals, kweights, nel, width=0.1): + occ = Occupations(nel=nel, width=width, wk=kweights, nspin=2) + return occ.occupy(evals) + + +def get_density_matrix(evals=None, evecs=None, kweights=None, nel=None, width=0.1): + occ = get_occupation(evals, kweights, nel, width=width) + rho = np.einsum("kib, kb, kjb -> kij", evecs, occ, evecs.conj()) + return rho + + +class MAE: + def __init__( + self, + model, + kmesh=None, + gamma=True, + kpts=None, + kweights=None, + width=0.1, + nel=None, + ): + self.model = model + if nel is not None: + self.model.nel = nel + if kpts is None: + self.kpts = monkhorst_pack(kmesh, gamma_center=gamma) + self.kweights = np.ones(len(self.kpts), dtype=float) / len(self.kpts) + else: + self.kpts = kpts + self.kweights = kweights + self.width = width + + def get_band_energy(self): + evals, evecs = self.model.solve_all(self.kpts) + occ = get_occupation(evals, self.kweights, self.model.nel, width=self.width) + eband = np.sum(evals * occ * self.kweights[:, np.newaxis]) + return eband + + def calc_ref(self): + # calculate the Hk_ref, Sk_ref, Hk_soc_ref, and rho_ref + self.Sk_ref = R_to_k(self.kpts, self.model.Rlist, self.model.SR) + self.Hk_xc_ref = R_to_k(self.kpts, self.model.Rlist, self.model._HR_copy) + self.Hk_soc_ref = R_to_k(self.kpts, self.model.Rlist, self.model.HR_soc) + self.rho_ref = np.zeros( + (len(self.kpts), self.model.nbasis, self.model.nbasis), dtype=complex + ) + + evals = np.zeros((len(self.kpts), self.model.nbasis), dtype=float) + evecs = np.zeros( + (len(self.kpts), self.model.nbasis, self.model.nbasis), dtype=complex + ) + + for ik, kpt in enumerate(self.kpts): + # evals, evecs = eigh(self.Hk_xc_ref[ik]+self.Hk_soc_ref[ik], self.Sk_ref[ik]) + evals[ik], evecs[ik] = eigh(self.Hk_xc_ref[ik], self.Sk_ref[ik]) + occ = get_occupation( + evals, self.kweights, self.model.nel, width=self.model.width + ) + # occ = fermi(evals, self.model.efermi, width=self.model.width) + self.rho_ref = np.einsum("kib, kb, kjb -> kij", evecs, occ, evecs.conj()) + + def get_band_energy_vs_angles( + self, + thetas, + phis, + ): + es = [] + # es2 = [] + # e,rho = self.model.get_band_energy(dm=True) + # self.calc_ref() + # thetas = np.linspace(*angle_range, npoints) + nangles = len(thetas) + for i in tqdm.trange(nangles): + theta = thetas[i] + phi = phis[i] + self.model.set_Hsoc_rotation_angle([theta, phi]) + e = self.get_band_energy() + es.append(e) + # es2.append(e2) + return es + + +class MAEGreen(MAE): + def __init__( + self, + model, + kmesh=None, + gamma=True, + kpts=None, + kweights=None, + nel=None, + width=0.1, + **kwargs, + ): + self.model = model + if nel is not None: + self.model.nel = nel + if kpts is None: + self.kpts = monkhorst_pack(kmesh, gamma_center=gamma) + self.kweights = np.ones(len(self.kpts), dtype=float) / len(self.kpts) + else: + self.kpts = kpts + self.kweights = kweights + self.width = width + model.set_so_strength(0.0) + evals, evecs = model.solve_all(self.kpts) + occ = Occupations( + nel=self.model.nel, width=self.width, wk=self.kweights, nspin=2 + ) + # occ.occupy(evals) + efermi = occ.efermi(evals) + print(f"{efermi=}") + self.G = TBGreen(model, kmesh, efermi=efermi, gamma=gamma, **kwargs) + self.emin = -52 + self.emax = 0 + self.nz = 100 + self._prepare_elist() + + def _prepare_elist(self, method="legendre"): + """ + prepare list of energy for integration. + The path has three segments: + emin --1-> emin + 1j*height --2-> emax+1j*height --3-> emax + """ + self.contour = Contour(self.emin, self.emax) + # if method.lower() == "rectangle": + # self.contour.build_path_rectangle( + # height=self.height, nz1=self.nz1, nz2=self.nz2, nz3=self.nz3 + # ) + if method.lower() == "semicircle": + self.contour.build_path_semicircle(npoints=self.nz, endpoint=True) + elif method.lower() == "legendre": + self.contour.build_path_legendre(npoints=self.nz, endpoint=True) + else: + raise ValueError(f"The path cannot be of type {method}.") + + def get_efermi(self): + evals, evecs = self.model.solve_all(self.kpts) + occ = Occupations( + nel=self.model.nel, width=self.model.width, wk=self.kweights, nspin=2 + ) + occ.get_efermi(evals, self.kweights) + + def get_perturbed(self, e, thetas, phis): + G0K = self.G.get_Gk_all(e) + Hsoc_k = self.model.get_Hk_soc(self.kpts) + dE_ang = [] + for theta, phi in zip(thetas, phis): + dE = 0.0 + for i, dHk in enumerate(Hsoc_k): + dHi = rotate_spinor_matrix(dHk, theta, phi) + GdH = G0K[i] @ dHi + # dE += np.trace(GdH @ G0K[i].T.conj() @ dHi) * self.kweights[i] + dE += np.trace(GdH @ GdH) * self.kweights[i] + dE_ang.append(dE) + return np.array(dE_ang) + + def get_band_energy_vs_angles(self, thetas, phis): + es = np.zeros(len(thetas)) + for ie, e in enumerate(tqdm.tqdm(self.contour.path)): + dE_angle = self.get_perturbed(e, thetas, phis) + es += np.imag(dE_angle * self.contour.weights[ie]) + return -es / np.pi / 2 + + +def get_model_energy(model, kmesh, gamma=True): + ham = MAE(model, kmesh, gamma=gamma) + return ham.get_band_energy() + + +def abacus_get_MAE( + path_nosoc, + path_soc, + kmesh, + thetas, + phis, + gamma=True, + outfile="MAE.txt", + nel=None, + width=0.1, +): + """Get MAE from Abacus with magnetic force theorem. Two calculations are needed. First we do an calculation with SOC but the soc_lambda is set to 0. Save the density. The next calculatin we start with the density from the first calculation and set the SOC prefactor to 1. With the information from the two calcualtions, we can get the band energy with magnetic moments in the direction, specified in two list, thetas, and phis.""" + parser = AbacusSplitSOCParser( + outpath_nosoc=path_nosoc, outpath_soc=path_soc, binary=False + ) + model = parser.parse() + if nel is not None: + model.nel = nel + ham = MAEGreen(model, kmesh, gamma=gamma, width=width) + es = ham.get_band_energy_vs_angles(thetas, phis) + if outfile: + with open(outfile, "w") as f: + f.write("#theta, phi, energy\n") + for theta, phi, e in zip(thetas, phis, es): + f.write(f"{theta:5.3f}, {phi:5.3f}, {e:10.9f}\n") + return es + + +def siesta_get_MAE( + fdf_fname, kmesh, thetas, phis, gamma=True, outfile="MAE.txt", nel=None, width=0.1 +): + """ """ + model = SislParser(fdf_fname=fdf_fname, read_H_soc=True).get_model() + if nel is not None: + model.nel = nel + ham = MAEGreen(model, kmesh, gamma=gamma, width=width) + # es = ham.get_band_energy_vs_angles(thetas, phis) + es = ham.get_band_energy_vs_angles(thetas, phis) + if outfile: + with open(outfile, "w") as f: + f.write("#theta, psi, energy\n") + for theta, psi, e in zip(thetas, phis, es): + # f.write(f"{theta}, {psi}, {e}\n") + f.write(f"{theta:5.3f}, {psi:5.3f}, {e:10.9f}\n") + return es + + +def test_AbacusSplitSOCWrapper(): + # path = Path("~/projects/2D_Fe").expanduser() + path = Path("~/projects/TB2Jflows/examples/2D_Fe/Fe_z").expanduser() + outpath_nosoc = f"{path}/soc0/OUT.ABACUS" + outpath_soc = f"{path}/soc1/OUT.ABACUS" + parser = AbacusSplitSOCParser( + outpath_nosoc=outpath_nosoc, outpath_soc=outpath_soc, binary=False + ) + model = parser.parse() + kmesh = [6, 6, 1] + + r = MAE(model, kmesh, gamma=True) + # thetas, es = r.get_band_energy_vs_theta(angle_range=(0, np.pi*2), rotation_axis="z", initial_direction=(1,0,0), npoints=21) + thetas, es, es2 = r.get_band_energy_vs_theta( + angle_range=(0, np.pi), + rotation_axis="y", + initial_direction=(0, 0, 1), + npoints=11, + ) + # print the table of thetas and es, es2 + for theta, e, e2 in zip(thetas, es, es2): + print(f"{theta=}, {e=}, {e2=}") + + plt.plot(thetas / np.pi, es - es[0], marker="o") + plt.plot(thetas / np.pi, es2 - es2[0], marker=".") + plt.savefig("E_along_z_x_z.png") + plt.show() + + +def abacus_get_MAE_cli(): + import argparse + + parser = argparse.ArgumentParser( + description="Get MAE from Abacus with magnetic force theorem. Two calculations are needed. First we do an calculation with SOC but the soc_lambda is set to 0. Save the density. The next calculatin we start with the density from the first calculation and set the SOC prefactor to 1. With the information from the two calcualtions, we can get the band energy with magnetic moments in the direction, specified in two list, thetas, and phis. " + ) + parser.add_argument("path_nosoc", type=str, help="Path to the calculation with ") + parser.add_argument("path_soc", type=str, help="Path to the SOC calculation") + parser.add_argument("thetas", type=float, nargs="+", help="Thetas") + parser.add_argument("psis", type=float, nargs="+", help="Phis") + parser.add_argument("kmesh", type=int, nargs=3, help="K-mesh") + parser.add_argument( + "--gamma", action="store_true", help="Use Gamma centered kpoints" + ) + parser.add_argument( + "--outfile", + type=str, + help="The angles and the energey will be saved in this file.", + ) + args = parser.parse_args() + abacus_get_MAE( + args.path_nosoc, + args.path_soc, + args.kmesh, + args.thetas, + args.psis, + gamma=args.gamma, + outfile=args.outfile, + ) + + +if __name__ == "__main__": + abacus_get_MAE_cli() diff --git a/TB2J/MAEGreen.py b/TB2J/MAEGreen.py new file mode 100644 index 0000000..1e5ec0a --- /dev/null +++ b/TB2J/MAEGreen.py @@ -0,0 +1,371 @@ +import gc +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import tqdm +from HamiltonIO.abacus.abacus_wrapper import AbacusSplitSOCParser +from HamiltonIO.model.occupations import GaussOccupations +from typing_extensions import DefaultDict + +from TB2J.anisotropy import Anisotropy +from TB2J.exchange import ExchangeNCL +from TB2J.external import p_map +from TB2J.mathutils.fibonacci_sphere import fibonacci_semisphere + +# from HamiltonIO.model.rotate_spin import rotate_Matrix_from_z_to_axis, rotate_Matrix_from_z_to_sperical +# from TB2J.abacus.abacus_wrapper import AbacusWrapper, AbacusParser +from TB2J.mathutils.rotate_spin import ( + rotate_spinor_matrix, +) + + +def get_occupation(evals, kweights, nel, width=0.1): + occ = GaussOccupations(nel=nel, width=width, wk=kweights, nspin=2) + return occ.occupy(evals) + + +class MAEGreen(ExchangeNCL): + def __init__(self, angles=None, **kwargs): + """ + angles are defined as theta, phi pairs, where theta is the angle between the z-axis and the magnetization direction, and phi is the angle between the x-axis and the projection of the magnetization direction on the x-y plane. + """ + super().__init__(**kwargs) + self.natoms = len(self.atoms) + if angles is None or angles == "axis": + self.set_angles_axis() + elif angles == "scan": + self.set_angles_scan() + elif angles == "fib": + self.set_angles_fib() + elif angles == "random": + self.set_angles_random() + elif angles == "miller": + self.set_angles_miller() + else: + self.thetas = angles[0] + self.phis = angles[1] + + nangles = len(self.thetas) + self.es = np.zeros(nangles, dtype=complex) + self.es_atom = np.zeros((nangles, self.natoms), dtype=complex) + self.es_atom_orb = DefaultDict(lambda: 0) + + def set_angles_axis(self): + """theta and phi are defined as the x, y, z, xy, yz, xz, xyz, x-yz, -xyz, -x-yz axis.""" + self.thetas = [0, np.pi / 2, np.pi / 2, np.pi / 2, np.pi, 0, np.pi / 2, 0, 0, 0] + self.phis = [0, 0, np.pi / 2, np.pi / 4, 0, 0, 0, np.pi] + + def set_angles_miller(self, nmax=2): + """theta and angles corresponding to the miller index. remove duplicates. + e.g. 002 and 001 are the same. + """ + thetas = [] + phis = [] + for k in range(0, nmax + 1): + for j in range(-nmax, nmax + 1): + for i in range(-nmax, nmax + 1): + if i == 0 and j == 0 and k == 0: + continue + thetas.append(np.arccos(k / np.sqrt(i**2 + j**2 + k**2))) + if i == 0 and j == 0: + phis.append(0) + else: + phis.append(np.arctan2(j, i)) + self.thetas = thetas + self.phis = phis + self.angle_pairs = list(zip(thetas, phis)) + # remove duplicates of angles using sets. + self.angle_pairs = list(set(self.angle_pairs)) + self.thetas, self.phis = zip(*self.angle_pairs) + + def set_angles_scan(self, step=15): + self.thetas = [] + self.phis = [] + for i in range(0, 181, step): + for j in range(0, 181, step): + self.thetas.append(i * np.pi / 180) + self.phis.append(j * np.pi / 180) + + def set_angles_random(self, n=16): + # n random pairs of theta, phi + self.thetas = np.random.random(n) * np.pi + self.phis = np.random.random(n) * 2 * np.pi + + def set_angles_fib(self, n=35): + self.thetas, self.phis = fibonacci_semisphere(n) + # thetas and phis are np.array + # add (theta, phi): (pi/2, pi/2) and (pi/2, pi/4) + # self.thetas += [np.pi / 2, np.pi / 2] + # self.phis += [np.pi / 2, np.pi / 4] + for i in range(8): + self.thetas = np.concatenate([self.thetas, [np.pi / 2]]) + self.phis = np.concatenate([self.phis, [np.pi * i / 8]]) + + def get_band_energy_vs_angles_from_eigen( + self, + thetas, + phis, + ): + """ + Calculate the band energy for a given set of angles by using the eigenvalues and eigenvectors of the Hamiltonian. + """ + es = [] + nangles = len(thetas) + self.tbmodel.set_so_strength(1.0) + for i in tqdm.trange(nangles): + theta = thetas[i] + phi = phis[i] + self.tbmodel.set_Hsoc_rotation_angle([theta, phi]) + e = self.get_band_energy() + es.append(e) + return es + + def get_band_energy(self): + self.width = 0.1 + evals, _evecs = self.tbmodel.solve_all(self.G.kpts) + occ = get_occupation(evals, self.G.kweights, self.tbmodel.nel, width=self.width) + eband = np.sum(evals * occ * self.G.kweights[:, np.newaxis]) + return eband + + def get_perturbed(self, e, thetas, phis): + self.tbmodel.set_so_strength(0.0) + # maxsoc = self.tbmodel.get_max_Hsoc_abs() + # maxH0 = self.tbmodel.get_max_H0_spin_abs() + # if maxsoc > maxH0 * 0.01: + # print(f"""Warning: The SOC of the Hamiltonian has a maximum of {maxsoc} eV, + # comparing to the maximum of {maxH0} eV of the spin part of the Hamiltonian. + # The SOC is too strong, the perturbation theory may not be valid.""") + + G0K = self.G.get_Gk_all(e) + Hsoc_k = self.tbmodel.get_Hk_soc(self.G.kpts) + na = len(thetas) + dE_angle = np.zeros(na, dtype=complex) + dE_angle_atom = np.zeros((na, self.natoms), dtype=complex) + # dE_angle_orbitals = np.zeros((na, self.natoms, self.norb, self.norb), dtype=complex) + # dE_angle_orbitals = DefaultDict(lambda: 0) + dE_angle_atom_orb = DefaultDict(lambda: 0) + for iangle, (theta, phi) in enumerate(zip(thetas, phis)): + for ik, dHk in enumerate(Hsoc_k): + dHi = rotate_spinor_matrix(dHk, theta, phi) + GdH = G0K[ik] @ dHi + # dE += np.trace(GdH @ G0K[i].T.conj() @ dHi) * self.kweights[i] + # diagonal of second order perturbation. + # dG2diag = np.diag(GdH @ GdH) + # dG2 = np.einsum("ij,ji->ij", GdH, GdH) + dG2 = GdH * GdH.T + dG2sum = np.sum(dG2) + # print(f"dG2sum-sum: {dG2sum}") + # dG2sum = np.sum(dG2diag) + + # dG2sum = np.trace(GdH @ GdH) + # print(f"dG2sum-Tr: {dG2sum}") + # dG1sum = np.trace(GdH) + # print(f"dG1sum-Tr: {dG1sum}") + + # dG2diag = np.diag(GdH @G0K[i].T.conj() @ dHi) + # dE_angle[iangle] += np.trace(GdH@GdH) * self.G.kweights[ik] + # dE_angle[iangle] += np.trace(GdH@G0K[ik].T.conj()@dHi ) * self.G.kweights[ik] + dE_angle[iangle] += dG2sum * self.G.kweights[ik] + for iatom in range(self.natoms): + iorb = self.iorb(iatom) + # dG2= dG2[::2, ::2] + dG2[1::2, 1::2] + dG2[1::2, ::2] + dG2[::2, 1::2] + dE_atom_orb = dG2[np.ix_(iorb, iorb)] * self.G.kweights[ik] + dE_atom_orb = ( + dE_atom_orb[::2, ::2] + + dE_atom_orb[1::2, 1::2] + + dE_atom_orb[1::2, ::2] + + dE_atom_orb[::2, 1::2] + ) + dE_atom = np.sum(dE_atom_orb) + mmat = self.mmats[iatom] + dE_atom_orb = mmat.T @ dE_atom_orb @ mmat + dE_angle_atom_orb[(iangle, iatom)] += dE_atom_orb + # dE_atom = np.sum(dG2diag[iorb]) * self.G.kweights[ik] + dE_angle_atom[iangle, iatom] += dE_atom + return dE_angle, dE_angle_atom, dE_angle_atom_orb + + def get_perturbed_R(self, e, thetas, phis): + self.tbmodel.set_so_strength(0.0) + # Here only the first R vector is considered. + # TODO: consider all R vectors. + # Rlist = np.zeros((1, 3), dtype=float) + # G0K = self.G.get_Gk_all(e) + # G0R = k_to_R(self.G.kpts, Rlist, G0K, self.G.kweights) + # dE_angle = np.zeros(len(thetas), dtype=complex) + # dE_angle_atoms = np.zeros((len(thetas), self.natoms), dtype=complex) + pass + + def get_band_energy_vs_angles(self, thetas, phis, with_eigen=False): + if with_eigen: + self.es2 = self.get_band_energy_vs_angles_from_eigen(thetas, phis) + + # for ie, e in enumerate(tqdm.tqdm(self.contour.path)): + # dE_angle, dE_angle_atom, dE_angle_atom_orb = self.get_perturbed( + # e, thetas, phis + # ) + # self.es += dE_angle * self.contour.weights[ie] + # self.es_atom += dE_angle_atom * self.contour.weights[ie] + # for key, value in dE_angle_atom_orb.items(): + # self.es_atom_orb[key] += ( + # dE_angle_atom_orb[key] * self.contour.weights[ie] + # ) + + # rewrite the for loop above to use p_map + def func(e): + return self.get_perturbed(e, thetas, phis) + + if self.nproc > 1: + results = p_map(func, self.contour.path, num_cpus=self.nproc) + else: + npole = len(self.contour.path) + results = map(func, tqdm.tqdm(self.contour.path, total=npole)) + for i, result in enumerate(results): + dE_angle, dE_angle_atom, dE_angle_atom_orb = result + self.es += dE_angle * self.contour.weights[i] + self.es_atom += dE_angle_atom * self.contour.weights[i] + for key, value in dE_angle_atom_orb.items(): + self.es_atom_orb[key] += ( + dE_angle_atom_orb[key] * self.contour.weights[i] + ) + + self.es = -np.imag(self.es) / (2 * np.pi) + self.es_atom = -np.imag(self.es_atom) / (2 * np.pi) + for key, value in self.es_atom_orb.items(): + self.es_atom_orb[key] = -np.imag(value) / (2 * np.pi) + + def fit_anisotropy_tensor(self): + self.ani = Anisotropy.fit_from_data(self.thetas, self.phis, self.es) + return self.ani + + def output( + self, + output_path="TB2J_anisotropy", + with_eigen=True, + figure3d="MAE_3d.png", + figure_contourf="MAE_contourf.png", + ): + Path(output_path).mkdir(exist_ok=True) + fname = f"{output_path}/MAE.dat" + fname_orb = f"{output_path}/MAE_orb.dat" + fname_tensor = f"{output_path}/MAE_tensor.dat" + if figure3d is not None: + fname_fig3d = f"{output_path}/{figure3d}" + if figure_contourf is not None: + fname_figcontourf = f"{output_path}/{figure_contourf}" + + # ouput with eigenvalues. + if with_eigen: + fname_eigen = f"{output_path}/MAE_eigen.dat" + with open(fname_eigen, "w") as f: + f.write("# theta, phi, MAE(total), MAE(atom-wise) Unit: meV\n") + for i, (theta, phi, e, es) in enumerate( + zip(self.thetas, self.phis, self.es2, self.es_atom) + ): + f.write(f"{theta:.5f} {phi:.5f} {e*1e3:.8f} ") + for ea in es: + f.write(f"{ea*1e3:.8f} ") + f.write("\n") + + with open(fname, "w") as f: + f.write("# theta (rad), phi(rad), MAE(total), MAE(atom-wise) Unit: meV\n") + for i, (theta, phi, e, es) in enumerate( + zip(self.thetas, self.phis, self.es, self.es_atom) + ): + f.write(f"{theta%np.pi:.5f} {phi%(2*np.pi):.5f} {e*1e3:.8f} ") + for ea in es: + f.write(f"{ea*1e3:.8f} ") + f.write("\n") + + self.ani = self.fit_anisotropy_tensor() + with open(fname_tensor, "w") as f: + f.write("# Anisotropy tensor in meV\n") + f.write(f"{self.ani.tensor_strings(include_isotropic=False)}\n") + + if figure3d is not None: + self.ani.plot_3d(figname=fname_fig3d, show=False) + + if figure_contourf is not None: + self.ani.plot_contourf(figname=fname_figcontourf, show=False) + + plt.close() + gc.collect() + + with open(fname_orb, "w") as f: + f.write("=" * 80 + "\n") + f.write("Orbitals for each atom: \n") + f.write("=" * 80 + "\n") + f.write("Note: the energies are in meV\n") + for iatom in range(self.natoms): + f.write(f"Atom {iatom:03d}: ") + for orb in self.orbital_names[iatom]: + f.write(f"{orb} ") + f.write("\n") + for i, (theta, phi, e, eatom) in enumerate( + zip(self.thetas, self.phis, self.es, self.es_atom) + ): + f.write("-" * 60 + "\n") + f.write(f"Angle {i:03d}: theta={theta:.5f} phi={phi:.5f} \n ") + f.write(f"E: {e*1e3:.8f} \n") + for iatom, ea in enumerate(eatom): + f.write(f"Atom {iatom:03d}: {ea*1e3:.8f} \n") + f.write("Orbital: ") + eorb = self.es_atom_orb[(i, iatom)] + + # write numpy matrix to file + f.write( + np.array2string( + eorb * 1e3, precision=4, separator=",", suppress_small=True + ) + ) + + eorb_diff = eorb - self.es_atom_orb[(0, iatom)] + f.write("Diference to the first angle: ") + f.write( + np.array2string( + eorb_diff * 1e3, + precision=4, + separator=",", + suppress_small=True, + ) + ) + f.write("\n") + + def run(self, output_path="TB2J_anisotropy", with_eigen=False): + self.get_band_energy_vs_angles(self.thetas, self.phis, with_eigen=with_eigen) + self.output(output_path=output_path, with_eigen=with_eigen) + + +def abacus_get_MAE( + path_nosoc, + path_soc, + kmesh, + thetas, + phis, + gamma=True, + output_path="TB2J_anisotropy", + magnetic_elements=None, + nel=None, + width=0.1, + with_eigen=False, + **kwargs, +): + """Get MAE from Abacus with magnetic force theorem. Two calculations are needed. First we do an calculation with SOC but the soc_lambda is set to 0. Save the density. The next calculatin we start with the density from the first calculation and set the SOC prefactor to 1. With the information from the two calcualtions, we can get the band energy with magnetic moments in the direction, specified in two list, thetas, and phis.""" + parser = AbacusSplitSOCParser( + outpath_nosoc=path_nosoc, outpath_soc=path_soc, binary=False + ) + model = parser.parse() + model.set_so_strength(0.0) + if nel is not None: + model.nel = nel + mae = MAEGreen( + tbmodels=model, + atoms=model.atoms, + kmesh=kmesh, + efermi=None, + basis=model.basis, + angles=[thetas, phis], + magnetic_elements=magnetic_elements, + **kwargs, + ) + mae.run(output_path=output_path, with_eigen=with_eigen) diff --git a/TB2J/abacus/MAE.py b/TB2J/abacus/MAE.py deleted file mode 100644 index 28506ef..0000000 --- a/TB2J/abacus/MAE.py +++ /dev/null @@ -1,320 +0,0 @@ -import numpy as np -from TB2J.abacus.abacus_wrapper import AbacusWrapper, AbacusParser -from TB2J.mathutils.rotate_spin import rotate_Matrix_from_z_to_axis -from TB2J.kpoints import monkhorst_pack -from TB2J.mathutils.fermi import fermi -from TB2J.mathutils.kR_convert import R_to_k -from scipy.linalg import eigh -from copy import deepcopy -from scipy.spatial.transform import Rotation -import matplotlib.pyplot as plt -from pathlib import Path -from TB2J.abacus.occupations import Occupations - -# TODO List: -# - [x] Add the class AbacusSplitSOCWrapper -# - [x] Add the function to rotate the XC part -# - [x] Compute the band energy at arbitrary angle - - -def get_occupation(evals, kweights, nel, width=0.1): - occ = Occupations(nel=nel, width=width, wk=kweights, nspin=2) - return occ.occupy(evals) - - -def get_density_matrix(evals=None, evecs=None, kweights=None, nel=None, width=0.1): - occ = get_occupation(evals, kweights, nel, width=width) - rho = np.einsum("kib, kb, kjb -> kij", evecs, occ, evecs.conj()) - return rho - - -def spherical_to_cartesian(theta, phi, normalize=True): - """ - Convert spherical coordinates to cartesian - """ - x = np.sin(theta) * np.cos(phi) - y = np.sin(theta) * np.sin(phi) - z = np.cos(theta) - vec = np.array([x, y, z]) - if normalize: - vec = vec / np.linalg.norm(vec) - return vec - - -class AbacusSplitSOCWrapper(AbacusWrapper): - """ - Abacus wrapper with Hamiltonian split to SOC and non-SOC parts - """ - - def __init__(self, *args, **kwargs): - HR_soc = kwargs.pop("HR_soc", None) - # nbasis = HR_soc.shape[1] - # kwargs["nbasis"] = nbasis - super().__init__(*args, **kwargs) - self._HR_copy = deepcopy(self._HR) - self.HR_soc = HR_soc - self.soc_lambda = 1.0 - self.nel = 16 - self.width = 0.1 - - @property - def HR(self): - return self._HR + self.HR_soc * self.soc_lambda - - def rotate_HR_xc(self, axis): - """ - Rotate SOC part of Hamiltonian - """ - for iR, R in enumerate(self.Rlist): - self._HR[iR] = rotate_Matrix_from_z_to_axis(self._HR_copy[iR], axis) - - def rotate_Hk_xc(self, axis): - """ - Rotate SOC part of Hamiltonian - """ - for ik in range(len(self._Hk)): - self._Hk[ik] = rotate_Matrix_from_z_to_axis(self._Hk_copy[ik], axis) - - def get_density_matrix(self, kpts, kweights=None): - rho = np.zeros((len(kpts), self.nbasis, self.nbasis), dtype=complex) - evals, evecs = self.solve_all(kpts) - occ = get_occupation(evals, kweights, self.nel, width=self.width) - rho = np.einsum( - "kib, kb, kjb -> kij", evecs, occ, evecs.conj() - ) # should multiply S to the the real DM. - return rho - - def rotate_DM(self, rho, axis): - """ - Rotate the density matrix - """ - for ik in range(len(rho)): - rho[ik] = rotate_Matrix_from_z_to_axis(rho[ik], axis) - return rho - - -class RotateHam: - def __init__(self, model, kmesh, gamma=True): - self.model = model - self.kpts = monkhorst_pack(kmesh, gamma_center=gamma) - self.kweights = np.ones(len(self.kpts), dtype=float) / len(self.kpts) - - def get_band_energy2(self): - for ik, kpt in enumerate(self.kpts): - Hk, Sk = self.model.gen_ham(kpt) - evals, evecs = eigh(Hk, Sk) - rho = np.einsum( - "ib, b, jb -> ij", - evecs, - fermi(evals, self.model.efermi, width=0.05), - evecs.conj(), - ) - eband1 = np.sum(evals * fermi(evals, self.model.efermi, width=0.05)) - eband2 = np.trace(Hk @ rho) - print(eband1, eband2) - - def get_band_energy(self, dm=False): - evals, evecs = self.model.solve_all(self.kpts) - occ = get_occupation( - evals, self.kweights, self.model.nel, width=self.model.width - ) - eband = np.sum(evals * occ * self.kweights[:, np.newaxis]) - # * fermi(evals, self.model.efermi, width=0.05) - if dm: - density_matrix = self.model.get_density_matrix(evecs) - return eband, density_matrix - else: - return eband - - def calc_ref(self): - # calculate the Hk_ref, Sk_ref, Hk_soc_ref, and rho_ref - self.Sk_ref = R_to_k(self.kpts, self.model.Rlist, self.model.SR) - self.Hk_xc_ref = R_to_k(self.kpts, self.model.Rlist, self.model._HR_copy) - self.Hk_soc_ref = R_to_k(self.kpts, self.model.Rlist, self.model.HR_soc) - self.rho_ref = np.zeros( - (len(self.kpts), self.model.nbasis, self.model.nbasis), dtype=complex - ) - - evals = np.zeros((len(self.kpts), self.model.nbasis), dtype=float) - evecs = np.zeros( - (len(self.kpts), self.model.nbasis, self.model.nbasis), dtype=complex - ) - - for ik, kpt in enumerate(self.kpts): - # evals, evecs = eigh(self.Hk_xc_ref[ik]+self.Hk_soc_ref[ik], self.Sk_ref[ik]) - evals[ik], evecs[ik] = eigh(self.Hk_xc_ref[ik], self.Sk_ref[ik]) - occ = get_occupation( - evals, self.kweights, self.model.nel, width=self.model.width - ) - # occ = fermi(evals, self.model.efermi, width=self.model.width) - self.rho_ref = np.einsum("kib, kb, kjb -> kij", evecs, occ, evecs.conj()) - - def get_band_energy_from_rho(self, axis): - """ - This is wrong!! Should use second order perturbation theory to get the band energy instead. - """ - eband = 0.0 - for ik, k in enumerate(self.kpts): - rho = rotate_Matrix_from_z_to_axis(self.rho_ref[ik], axis) - Hk_xc = rotate_Matrix_from_z_to_axis(self.Hk_xc_ref[ik], axis) - Hk_soc = self.Hk_soc_ref[ik] - Htot = Hk_xc + Hk_soc * self.model.soc_lambda - # Sk = self.Sk_ref[ik] - # evals, evecs = eigh(Htot, Sk) - # rho2= np.einsum("ib, b, jb -> ij", evecs, fermi(evals, self.model.efermi, width=0.05), evecs.conj()) - if ik == 0 and False: - pass - # print(f"{evecs[:4,0:4].real=}") - # print(f"{evals[:4]=}") - # print(f"{Hk_xc[:4,0:4].real=}") - # print(f"{Htot[:4,0:4].real=}") - # print(f"{Sk[:4,0:4].real=}") - # print(f"{rho[:4,0:4].real=}") - # print(f"{rho2[:4,0:4].real=}") - # eband1 = np.sum(evals * fermi(evals, self.model.efermi, width=0.05)) - # eband2 = np.trace(Htot @ rho2).real - # eband3 = np.trace(Htot @ rho).real - # print(eband1, eband2, eband3) - e_soc = np.trace(Hk_soc @ rho) * self.kweights[ik] * self.model.soc_lambda - eband += e_soc - return eband - - def get_band_energy_vs_angles( - self, - thetas, - psis, - ): - es = [] - # es2 = [] - # e,rho = self.model.get_band_energy(dm=True) - # self.calc_ref() - # thetas = np.linspace(*angle_range, npoints) - for i, theta, phi in enumerate(zip(thetas, psis)): - axis = spherical_to_cartesian(theta, phi) - self.model.rotate_HR_xc(axis) - # self.get_band_energy2() - e = self.get_band_energy() - es.append(e) - # es2.append(e2) - return es - - -def get_model_energy(model, kmesh, gamma=True): - ham = RotateHam(model, kmesh, gamma=gamma) - return ham.get_band_energy() - - -class AbacusSplitSOCParser: - """ - Abacus parser with Hamiltonian split to SOC and non-SOC parts - """ - - def __init__(self, outpath_nosoc=None, outpath_soc=None, binary=False): - self.outpath_nosoc = outpath_nosoc - self.outpath_soc = outpath_soc - self.binary = binary - self.parser_nosoc = AbacusParser(outpath=outpath_nosoc, binary=binary) - self.parser_soc = AbacusParser(outpath=outpath_soc, binary=binary) - spin1 = self.parser_nosoc.read_spin() - spin2 = self.parser_soc.read_spin() - if spin1 != "noncollinear" or spin2 != "noncollinear": - raise ValueError("Spin should be noncollinear") - - def parse(self): - nbasis, Rlist, HR, SR = self.parser_nosoc.Read_HSR_noncollinear() - nbasis2, Rlist2, HR2, SR2 = self.parser_soc.Read_HSR_noncollinear() - # print(HR[0]) - HR_soc = HR2 - HR - model = AbacusSplitSOCWrapper(HR, SR, Rlist, nbasis, nspin=2, HR_soc=HR_soc) - model.efermi = self.parser_soc.efermi - model.basis = self.parser_nosoc.basis - model.atoms = self.parser_nosoc.atoms - return model - - -def abacus_get_MAE( - path_nosoc, path_soc, kmesh, thetas, psis, gamma=True, outfile="MAE.txt" -): - """Get MAE from Abacus with magnetic force theorem. Two calculations are needed. First we do an calculation with SOC but the soc_lambda is set to 0. Save the density. The next calculatin we start with the density from the first calculation and set the SOC prefactor to 1. With the information from the two calcualtions, we can get the band energy with magnetic moments in the direction, specified in two list, thetas, and phis.""" - parser = AbacusSplitSOCParser( - outpath_nosoc=path_nosoc, outpath_soc=path_soc, binary=False - ) - model = parser.parse() - ham = RotateHam(model, kmesh, gamma=gamma) - es = [] - for theta, psi in zip(thetas, psis): - axis = spherical_to_cartesian(theta, psi) - model.rotate_HR_xc(axis) - e = ham.get_band_energy() - es.append(ham.get_band_energy()) - if outfile: - with open(outfile, "w") as f: - f.write("theta, psi, energy\n") - for theta, psi, e in zip(thetas, psis, es): - f.write(f"{theta}, {psi}, {e}\n") - return es - - -def test_AbacusSplitSOCWrapper(): - # path = Path("~/projects/2D_Fe").expanduser() - path = Path("~/projects/TB2Jflows/examples/2D_Fe/Fe_z").expanduser() - outpath_nosoc = f"{path}/soc0/OUT.ABACUS" - outpath_soc = f"{path}/soc1/OUT.ABACUS" - parser = AbacusSplitSOCParser( - outpath_nosoc=outpath_nosoc, outpath_soc=outpath_soc, binary=False - ) - model = parser.parse() - kmesh = [6, 6, 1] - - r = RotateHam(model, kmesh) - # thetas, es = r.get_band_energy_vs_theta(angle_range=(0, np.pi*2), rotation_axis="z", initial_direction=(1,0,0), npoints=21) - thetas, es, es2 = r.get_band_energy_vs_theta( - angle_range=(0, np.pi * 2), - rotation_axis="y", - initial_direction=(0, 0, 1), - npoints=11, - ) - # print the table of thetas and es, es2 - print("theta, e, e2") - for theta, e, e2 in zip(thetas, es, es2): - print(f"{theta=}, {e=}, {e2=}") - - plt.plot(thetas / np.pi, es - es[0], marker="o") - plt.plot(thetas / np.pi, es2 - es2[0], marker=".") - plt.savefig("E_along_z_x_z.png") - plt.show() - - -def abacus_get_MAE_cli(): - import argparse - - parser = argparse.ArgumentParser( - description="Get MAE from Abacus with magnetic force theorem. Two calculations are needed. First we do an calculation with SOC but the soc_lambda is set to 0. Save the density. The next calculatin we start with the density from the first calculation and set the SOC prefactor to 1. With the information from the two calcualtions, we can get the band energy with magnetic moments in the direction, specified in two list, thetas, and phis. " - ) - parser.add_argument("path_nosoc", type=str, help="Path to the calculation with ") - parser.add_argument("path_soc", type=str, help="Path to the SOC calculation") - parser.add_argument("thetas", type=float, nargs="+", help="Thetas") - parser.add_argument("psis", type=float, nargs="+", help="Phis") - parser.add_argument("kmesh", type=int, nargs=3, help="K-mesh") - parser.add_argument( - "--gamma", action="store_true", help="Use Gamma centered kpoints" - ) - parser.add_argument( - "--outfile", - type=str, - help="The angles and the energey will be saved in this file.", - ) - args = parser.parse_args() - abacus_get_MAE( - args.path_nosoc, - args.path_soc, - args.kmesh, - args.thetas, - args.psis, - gamma=args.gamma, - outfile=args.outfile, - ) - - -if __name__ == "__main__": - abacus_get_MAE_cli() diff --git a/TB2J/abacus/__init__.py b/TB2J/abacus/__init__.py deleted file mode 100644 index 88ae552..0000000 --- a/TB2J/abacus/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .abacus_wrapper import AbacusWrapper, AbacusParser diff --git a/TB2J/abacus/occupations.py b/TB2J/abacus/occupations.py deleted file mode 100644 index 6509bed..0000000 --- a/TB2J/abacus/occupations.py +++ /dev/null @@ -1,278 +0,0 @@ -""" -This file is stolen from the hotbit programm, with some modification. -""" - -import numpy as np -from scipy.optimize import brentq -import sys - -from ase.dft.dos import DOS -from scipy import integrate - -# import numba - -# from numba import float64, int32 - -MAX_EXP_ARGUMENT = np.log(sys.float_info.max) - -# @numba.vectorize(nopython=True) -# def myfermi(e, mu, width, nspin): -# x = (e - mu) / width -# if x < -10: -# ret = 2.0 / nspin -# elif x > 10: -# ret = 0.0 -# else: -# ret = 2.0 / nspin / (math.exp(x) + 1) -# return ret - - -def myfermi(e, mu, width, nspin): - x = (e - mu) / width - return np.where(x < 10, 2.0 / (nspin * (np.exp(x) + 1.0)), 0.0) - - -class Occupations(object): - def __init__(self, nel, width, wk, nspin=1): - """ - Initialize parameters for occupations. - :param nel: Number of electrons - :param width: Fermi-broadening - :param wk: k-point weights. eg. If only gamma, [1.0] - :param nspin(optional): number of spin, if spin=1 multiplicity=2 else, multiplicity=1. - """ - self.nel = nel - self.width = width - self.wk = wk - self.nk = len(wk) - self.nspin = nspin - - def get_mu(self): - """Return the Fermi-level (or chemical potential).""" - return self.mu - - def fermi(self, mu): - """ - Occupy states with given chemical potential. - Occupations are 0...2; without k-point weights - """ - return myfermi(self.e, mu, self.width, self.nspin) - - def root_function(self, mu): - """This function is exactly zero when mu is right.""" - f = self.fermi(mu) - return np.einsum("i, ij->", self.wk, f) - self.nel - - def occupy(self, e, xtol=1e-11): - """ - Calculate occupation numbers with given Fermi-broadening. - - @param e: e[ind_k,ind_orb] energy of k-point, state a - Note added by hexu: With spin=2,e[k,a,sigma], it also work. only the *2 should be removed. - @param wk: wk[:] weights for k-points - @param width: The Fermi-broadening - - Returns: fermi[ind_k, ind_orb] - """ - self.e = e - eflat = e.flatten() - ind = np.argsort(eflat) - e_sorted = eflat[ind] - if self.nspin == 1: - m = 2 - elif self.nspin == 2: - m = 1 - n_sorted = (self.wk[:, None, None] * np.ones_like(e) * m).flatten()[ind] - - sum = n_sorted.cumsum() - if self.nel < sum[0]: - ifermi = 0 - elif self.nel > sum[-1]: - raise ("number of electrons larger than number of orbital*spin") - else: - ifermi = np.searchsorted(sum, self.nel) - try: - if ifermi == 0: - elo = e_sorted[0] - else: - elo = e_sorted[ifermi - 1] - if ifermi == len(e_sorted) - 1: - ehi = e_sorted[-1] - else: - ehi = e_sorted[ifermi + 1] - guess = e_sorted[ifermi] - dmu = np.max((self.width, guess - elo, ehi - guess)) - mu = brentq(self.root_function, guess - dmu, guess + dmu, xtol=xtol) - # mu = brent( - # self.root_function, - # brack=(guess - elo, guess, guess + dmu), - # tol=xtol) - except Exception as E: - # probably a bad guess - print("Error in finding Fermi level: ", E) - dmu = self.width - if self.nel < 1e-3: - mu = min(e_sorted) - dmu * 20 - elif self.nel - sum[-1] > -1e-3: - mu = max(e_sorted) + dmu * 20 - else: - # mu = brent( - # self.root_function, - # brack=(e_sorted[0] - dmu * 10, - # guess, - # e_sorted[-1] + dmu * 10), - # tol=xtol) - mu = brentq( - self.root_function, - e_sorted[0] - dmu * 20, - e_sorted[-1] + dmu * 20, - xtol=xtol, - ) - - if np.abs(self.root_function(mu)) > xtol * 1e4: - # raise RuntimeError( - # 'Fermi level could not be assigned reliably. Has the system fragmented?' - # ) - print( - "Fermi level could not be assigned reliably. Has the system fragmented?" - ) - - f = self.fermi(mu) - # rho=(self.eigenvecs*f).dot(self.eigenvecs.transpose()) - - self.mu, self.f = mu, f - return f - - def plot(self): - import pylab as pl - - for ik in range(self.nk): - pl.plot(self.e[ik, :], self.f[ik, :]) - pl.scatter(self.e[ik, :], self.f[ik, :]) - pl.title("occupations") - pl.xlabel("energy (Ha)") - pl.ylabel("occupation") - pl.show() - - -class GaussOccupations(Occupations): - def get_mu(self): - return self.mu - - def delta(self, energy): - """Return a delta-function centered at 'energy'.""" - x = -(((self.e - energy) / self.width) ** 2) - return np.exp(x) / (np.sqrt(np.pi) * self.width) - - def get_dos(self, npts=500): - eflat = self.e.flatten() - ind = np.argsort(eflat) - ##e_sorted = eflat[ind] - if self.nspin == 1: - m = 2 - elif self.nspin == 2: - m = 1 - # n_sorted = (self.wk * np.ones_like(self.e) * m).flatten()[ind] - dos = np.zeros(npts) - for w, e_n in zip(self.w_k, self.e_skn[0]): - for e in e_n: - dos += w * self.delta(e) - - def root_function(self, mu): - pass - - # @profile - def occupy(self, e, xtol=1e-8, guess=0.0): - self.e = e - dos = myDOS(kweights=self.wk, eigenvalues=e, width=self.width, npts=501) - edos = dos.get_energies() - d = dos.get_dos() - idos = integrate.cumtrapz(d, edos, initial=0) - self.nel - # f_idos = interpolate.interp1d(edos, idos) - # ret = optimize.fmin(f_idos, x0=edos[400], xtol=xtol, disp=True) - ifermi = np.searchsorted(idos, 0.0) - # self.mu = ret[0] - self.mu = edos[ifermi] - self.f = self.fermi(self.mu) - return self.f - - -class myDOS(DOS): - def __init__( - self, kweights, eigenvalues, nspin=1, width=0.1, window=None, npts=1001 - ): - """Electronic Density Of States object. - - calc: calculator object - Any ASE compliant calculator object. - width: float - Width of guassian smearing. Use width=0.0 for linear tetrahedron - interpolation. - window: tuple of two float - Use ``window=(emin, emax)``. If not specified, a window - big enough to hold all the eigenvalues will be used. - npts: int - Number of points. - - """ - self.npts = npts - self.width = width - # self.w_k = calc.get_k_point_weights() - self.w_k = kweights - self.nspins = nspin - # self.e_skn = np.array([[calc.get_eigenvalues(kpt=k, spin=s) - # for k in range(len(self.w_k))] - # for s in range(self.nspins)]) - # self.e_skn -= calc.get_fermi_level() - self.e_skn = np.array([eigenvalues.T]) # eigenvalues: iband, ikpt - - if window is None: - emin = None - emax = None - else: - emin, emax = window - - if emin is None: - emin = self.e_skn.min() - 10 * self.width - if emax is None: - emax = self.e_skn.max() + 10 * self.width - - self.energies = np.linspace(emin, emax, npts) - - # if width == 0.0: # To use tetrahedron method - # bzkpts = calc.get_bz_k_points() - # size, offset = get_monkhorst_pack_size_and_offset(bzkpts) - # bz2ibz = calc.get_bz_to_ibz_map() - # shape = (self.nspins,) + tuple(size) + (-1,) - # self.e_skn = self.e_skn[:, bz2ibz].reshape(shape) - # self.cell = calc.atoms.cell - - def get_idos(self): - e, d = self.get_dos() - return np.trapz(d, e) - - def delta(self, energy): - """Return a delta-function centered at 'energy'.""" - x = -(((self.energies - energy) / self.width) ** 2) - return np.exp(x) / (np.sqrt(np.pi) * self.width) - - def get_dos(self, spin=None): - """Get array of DOS values. - - The *spin* argument can be 0 or 1 (spin up or down) - if not - specified, the total DOS is returned. - """ - - if spin is None: - if self.nspins == 2: - # Spin-polarized calculation, but no spin specified - - # return the total DOS: - return self.get_dos(spin=0) + self.get_dos(spin=1) - else: - spin = 0 - - dos = np.zeros(self.npts) - for w, e_n in zip(self.w_k, self.e_skn[spin]): - for e in e_n: - dos += w * self.delta(e) - return dos diff --git a/TB2J/anisotropy.py b/TB2J/anisotropy.py new file mode 100644 index 0000000..73fe930 --- /dev/null +++ b/TB2J/anisotropy.py @@ -0,0 +1,746 @@ +import random +from dataclasses import dataclass +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +from numpy.linalg import matrix_rank +from scipy.interpolate import LinearNDInterpolator +from scipy.optimize import curve_fit + + +@dataclass +class Anisotropy: + T: np.ndarray = None + direction: np.ndarray = None + amplitude: float = None + isotropic_part: float = None + + @classmethod + def from_T6(cls, T6): + T = T6_to_T(T6) + isotropic_part = np.trace(T) / 3 + T -= isotropic_part * np.eye(3) + direction, amplitude = anisotropy_tensor_to_vector(T) + return cls( + direction=direction, amplitude=amplitude, isotropic_part=isotropic_part, T=T + ) + + @classmethod + def from_direction_amplitude(cls, direction, amplitude, isotropic_part=0.0): + T = anisotropy_vector_to_tensor(direction, amplitude) + return cls( + T=T, direction=direction, amplitude=amplitude, isotropic_part=isotropic_part + ) + + @classmethod + def from_tensor(cls, T): + isotropic_part = np.trace(T) / 3 * np.eye(3) + T -= np.trace(T) / 3 * np.eye(3) + direction, amplitude = anisotropy_tensor_to_vector(T) + return cls( + T=T, direction=direction, amplitude=amplitude, isotropic_part=isotropic_part + ) + + def __post_init__(self): + if self.isotropic_part is None: + self.isotropic_part = 0 + if self.T is None: + self.T = ( + anisotropy_vector_to_tensor(self.direction, self.amplitude) + + self.isotropic_part + ) + elif self.direction is None or self.amplitude is None: + self.isotropic_part = np.trace(self.T) / 3 * np.eye(3) + # print(f'anisotropic tensor = {self.anisotropic_part}') + self.direction, self.amplitude = anisotropy_tensor_to_vector( + self.anisotropic_part + ) + self.isotropic_part = np.trace(self.T) / 3 + if self.T is None or self.direction is None or self.amplitude is None: + raise ValueError( + "The input does not have enough information to create the anisotropy object." + ) + + def is_rank_one(self): + """ + single-axis anisotropy should be rank-one. + """ + print(f"rank = {matrix_rank(self.anisotropic_part)}") + print(f"anisotropic_part = {self.anisotropic_part}") + return matrix_rank(self.anisotropic_part) == 1 + + def tensor(self): + return self.T + + @property + def anisotropic_part(self): + return self.T - self.isotropic_part * np.eye(3) + + @property + def axis(self): + return self.direction + + @property + def axis_type(self): + if self.is_easy_axis(): + return "easy" + else: + return "hard" + + def axis_angle(self, unit="rad"): + theta = np.arccos(self.direction[2]) + phi = np.arctan2(self.direction[1], self.direction[0]) + if unit.startswith("deg"): + theta = theta * 180 / np.pi + phi = phi * 180 / np.pi + return theta, phi + + def amplitude(self): + return self.amplitude + + def is_easy_axis(self): + return self.amplitude > 0 + + def is_hard_axis(self): + return self.amplitude < 0 + + def energy_vector_form(self, S=None, angle=None, include_isotropic=False): + if S is None: + S = sphere_to_cartesian(angle) + print(f"S shape = {S.shape}") + # return anisotropy_energy_vector_form(S, *angles, self.amplitude, self.isotropic_part) + print(f"direction shape = {self.direction.shape}") + print(f"amplitude shape = {self.amplitude.shape}") + print(f"iso shape = {self.isotropic_part.shape}") + E = -self.amplitude * (S @ self.direction) ** 2 + if include_isotropic: + E = E + self.isotropic_part + return E + + def energy_tensor_form(self, S=None, angle=None, include_isotropic=False): + # return anisotropy_energy_tensor_form(self.T, S) + if S is None: + S = sphere_to_cartesian(angle) + if include_isotropic: + return -S.T @ self.T @ S + else: + return -S.T @ self.anisotropic_part @ S + + @classmethod + def fit_from_data(cls, thetas, phis, values, test=False, units="rad"): + """ + Fit the anisotropic tensor to the data + parameters: + thetas: the polar angle in degree + phis: the azimuthal angle in degree + values: the anisotropic value + Return: + the anisotropic object fitted from the data + """ + angles = np.vstack([thetas, phis]) + if units.lower().startswith("deg"): + angles = np.deg2rad(angles) + params, cov = curve_fit(anisotropy_energy, angles, values) + fitted_values = anisotropy_energy(angles, *params) + + delta = fitted_values - values + + # print(f'Max value = {np.max(values)}, Min value = {np.min(values)}') + if np.abs(delta).max() > 1e-4: + print("Warning: The fitting is not consistent with the data.") + print(f"Max-min = {np.max(values) - np.min(values)}") + print(f"delta = {np.max(np.abs(delta))}") + T = T6_to_T(params) + obj = cls(T=T) + + if test: + values2 = [] + for i in range(len(thetas)): + E = obj.energy_tensor_form( + angle=[thetas[i], phis[i]], include_isotropic=True + ) + values2.append(E) + # delta2 = np.array(values2) - values + # print(delta2) + + ax = plot_3D_scatter(angles, values - np.min(values), color="r") + plot_3D_scatter(angles, values2 - np.min(values), ax=ax, color="b") + plt.show() + + return obj + + @classmethod + def fit_from_data_vector_form(cls, thetas, phis, values): + """ + Fit the anisotropic tensor to the data + parameters: + thetas: the polar angle in degree + phis: the azimuthal angle in degree + values: the anisotropic value + Return: + the anisotropic object fitted from the data + """ + angles = np.vstack([thetas, phis]) + params, cov = curve_fit(anisotropy_energy_vector_form, angles, values) + fitted_values = anisotropy_energy_vector_form(angles, *params) + delta = fitted_values - values + print(f"Max value = {np.max(values)}, Min value = {np.min(values)}") + print(f"Max-min = {np.max(values) - np.min(values)}") + print(f"delta = {delta}") + theta_a, phi_a, amplitude, isotropic_part = params + direction = sphere_to_cartesian([theta_a, phi_a]) + return cls.from_direction_amplitude( + direction=direction, amplitude=amplitude, isotropic_part=isotropic_part + ) + + @classmethod + def fit_from_data_file(cls, fname, method="tensor"): + """ + Fit the anisotropic tensor to the data + parameters: + fname: the file name of the data + Return: + anisotropy: the anisotropic object + """ + data = np.loadtxt(fname) + theta, phi, value = data[:, 0], data[:, 1], data[:, 2] + if method == "tensor": + return cls.fit_from_data(theta, phi, value) + elif method == "vector": + return cls.fit_from_data_vector_form(theta, phi, value) + else: + raise ValueError(f"Unknown method {method}") + + @classmethod + def fit_from_xyz_data_file(cls, fname, method="tensor"): + """ + Fit the anisotropic tensor to the data with x y z val form + parameters: + fname: the file name of the data + Return: + anisotropy: the anisotropic object + """ + data = np.loadtxt(fname) + xyz, value = data[:, 0:3], data[:, 3] + theta, phi = np.array([cartesian_to_sphere(t) for t in xyz]).T + if method == "tensor": + return cls.fit_from_data(theta, phi, value) + elif method == "vector": + return cls.fit_from_data_vector_form(theta, phi, value) + else: + raise ValueError(f"Unknown method {method}") + + def plot_3d(self, ax=None, figname=None, show=True, surface=True): + """ + plot the anisotropic energy in all directions in 3D + S is the spin unit vector + """ + # theta, phi = np.meshgrid(theta, phi) + # value = self.energy_tensor_form(sphere_to_cartesian([theta, phi])) + # x, y, z = sphere_to_cartesian(theta, phi, ) + + if surface: + thetas = np.arange(0, 181, 1) + phis = np.arange(0, 362, 1) + + X, Y = np.meshgrid(thetas, phis) + Z = np.zeros(X.shape) + for i in range(len(thetas)): + for j in range(len(phis)): + # S = sphere_to_cartesian([thetas[i], phis[j]]) + # E = self.energy_vector_form(angle=[thetas[i], phis[j]]) + E = self.energy_tensor_form(angle=[thetas[i], phis[j]]) + Z[j, i] = E + Z = Z - np.min(Z) + imax = np.argmax(Z) + X_max, Y_max, Z_max = ( + X.flatten()[imax], + Y.flatten()[imax], + Z.flatten()[imax], + ) + imin = np.argmin(Z) + X_min, Y_min, Z_min = ( + X.flatten()[imin], + Y.flatten()[imin], + Z.flatten()[imin], + ) + + X, Y, Z = sphere_to_cartesian([X, Y], Z) + X_max, Y_max, Z_max = sphere_to_cartesian([X_max, Y_max], r=Z_max) + X_min, Y_min, Z_min = sphere_to_cartesian([X_min, Y_min], r=Z_min) + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + ax.plot_surface(X, Y, Z, cmap="viridis", alpha=0.5) + # scatter the minimal and maximal points + ax.scatter(X_max, Y_max, Z_max, color="r", marker="o") + ax.scatter(X_min, Y_min, Z_min, color="b", marker="o") + + # draw the easy axis + if self.is_easy_axis(): + color = "r" + else: + color = "b" + d = self.direction + d = d / np.sign(d[0]) + d *= np.abs(self.amplitude) * 2.5 + ax.quiver( + -d[0], + -d[1], + -d[2], + 2 * d[0], + 2 * d[1], + 2 * d[2], + color=color, + arrow_length_ratio=0.03, + ) + + else: + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + thetas = np.arange(0, 181, 5) + phis = np.arange(0, 360, 5) + Es = [] + angles = [] + for t in thetas: + for p in phis: + # S = sphere_to_cartesian([t, p]) + angles.append([t, p]) + Es.append( + self.energy_tensor_form(angle=[t, p], include_isotropic=False) + ) + # x, y, z = sphere_to_cartesian([t, p], r=E) + # ax.scatter(x,y, z, cmap='viridis') + angles = np.array(angles) + # plot_3D_scatter(angles.T, Es-np.min(Es), ax=None) + plot_3D_scatter(angles.T, Es, ax=None) + if figname is not None: + plt.savefig(figname) + plt.close() + if show: + plt.show() + + def plot_contourf(self, ax=None, figname=None, show=False): + if ax is None: + fig, ax = plt.subplots() + X, Y = np.meshgrid(np.arange(0, 180, 1), np.arange(-180, 180, 1)) + Z = np.zeros(X.shape) + ntheta, nphi = X.shape + for i in range(ntheta): + for j in range(nphi): + E = self.energy_tensor_form(angle=[X[i, j], Y[i, j]]) + Z[i, j] = E + # find the X, Y for min and max of Z + X_max, Y_max = np.unravel_index(np.argmax(Z), Z.shape) + X_min, Y_min = np.unravel_index(np.argmin(Z), Z.shape) + X_max, Y_max = X[X_max, Y_max], Y[X_max, Y_max] + X_min, Y_min = X[X_min, Y_min], Y[X_min, Y_min] + c = ax.contourf(X, Y, Z, cmap="viridis", levels=200) + # print(X_max, Y_max, X_min, Y_min) + # ax.scatter(X_max, Y_max, color="r", marker="o") + # ax.scatter(X_min, Y_min, color="b", marker="o") + ax.set_xlabel("$\theta$ (degree)") + ax.set_ylabel("$\phi$ degree") + # ax.scatter(X_max, Y_max, color="r", marker="o") + # ax.scatter(X_min, Y_min, color="r", marker="o") + + # colorbar + _cbar = plt.colorbar(c, ax=ax) + + if figname is not None: + plt.savefig(figname) + plt.close() + if show: + print(f"Max = {X_max}, {Y_max}, Min = {X_min}, {Y_min}") + plt.show() + return ax + + def tensor_strings(self, include_isotropic=False, multiplier=1): + """ + convert the energy tensor to strings for easy printing + parameters: + include_isotropic: if include the isotropic part + multiplier: the multiplier for the tensor. Use for scaling the tensor by units. + """ + if include_isotropic: + T = self.T + else: + T = self.T + strings = np.array2string(T * multiplier, precision=5, separator=" ") + return strings + + +def plot_3D_scatter(angles, values, ax=None, **kwargs): + if ax is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + thetas, phis = angles + for i in range(len(thetas)): + E = values[i] + t, p = thetas[i], phis[i] + x, y, z = sphere_to_cartesian([t, p], r=E) + ax.scatter(x, y, z, **kwargs) + return ax + + +def plot_3D_surface(fname, figname=None, show=True): + data = np.loadtxt(fname) + theta, phi, value = data[:, 0], data[:, 1], data[:, 2] + value = value - np.min(value) + + imax = np.argmax(value) + angle_max = theta[imax], phi[imax] + imin = np.argmin(value) + angle_min = theta[imin], phi[imin] + amplitude = np.max(value) - np.min(value) + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + x, y, z = sphere_to_cartesian([theta, phi], r=value) + # ax.plot_surface(x, y, z, triangles=None, cmap='viridis') + ax.scatter(x, y, z, cmap="viridis") + # ax.scatter(theta,phi, value, cmap='viridis') + # draw a surface plot with a color map. + if figname is not None: + plt.savefig(figname) + if show: + plt.show() + plt.close() + return angle_max, angle_min, amplitude + + +def plot_3D_surface_interpolation(fname, figname=None, show=True): + data = np.loadtxt(fname) + theta, phi, value = data[:, 0], data[:, 1], data[:, 2] + + imax = np.argmax(value) + angle_max = theta[imax], phi[imax] + imin = np.argmin(value) + angle_min = theta[imin], phi[imin] + amplitude = np.max(value) - np.min(value) + + value = value - np.min(value) + # interploate the data + interp = LinearNDInterpolator((theta, phi), value) + thetas = np.arange(0, 181, 1) + phis = np.arange(0, 340, 1) + + X, Y = np.meshgrid(thetas, phis) + Z = interp(X, Y) + print(Z) + # print(np.max(Z), np.min(Z)) + Z = Z - np.min(Z) + X, Y, Z = sphere_to_cartesian([X, Y], Z) + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + ax.plot_surface(X, Y, Z, cmap="viridis") + if figname is not None: + plt.savefig(figname) + if show: + plt.show() + plt.close() + return angle_max, angle_min, amplitude + + +def sphere_to_cartesian(angles, r=1): + """ + Transform the spherical coordinates to the cartesian coordinates + parameters: + angles: the polar and azimuthal angle in degree + r: the radius + Return: + x, y, z: the cartesian coordinates + """ + # print(angles) + theta, phi = angles + theta = theta * np.pi / 180 + phi = phi * np.pi / 180 + x = r * np.sin(theta) * np.cos(phi) + y = r * np.sin(theta) * np.sin(phi) + z = r * np.cos(theta) + return np.array([x, y, z]) + + +def cartesian_to_sphere(xyz, unit="deg"): + """ + Transform the cartesian coordinates to the spherical coordinates + parameters: + xyz: the cartesian coordinates + Return: + theta, phi: the polar and azimuthal angle in degree + """ + x, y, z = xyz + r = np.linalg.norm(xyz) + theta = np.arccos(z / r) + phi = np.arctan2(y, x) + if unit.lower().startswith("deg"): + theta = theta * 180 / np.pi + phi = phi * 180 / np.pi + elif unit.lower.startswith("rad"): + pass + else: + raise ValueError("unit must be 'deg' or 'rad'") + return np.array([theta, phi]) + + +def anisotropy_energy(angle, Txx, Tyy, Tzz, Tyz, Tzx, Txy): + """ + Calculate the anisotropic energy + parameters: + angle: the polar and azimuthal angle in degree + Txx, Tyy, Tzz, Tyz, Tzx, Txy: the anisotropic tensor + Return: + E: the anisotropic energy + """ + # transform the tensor to the matrix form + T = np.array([[Txx, Txy, Tzx], [Txy, Tyy, Tyz], [Tzx, Tyz, Tzz]]) + + S = sphere_to_cartesian(angle) + E = -np.diag(S.T @ T @ S) + return E + + +def anisotropy_energy_vector_form(S, k_theta, k_phi, amplitude, isotropic_part=0): + """ + Calculate the anisotropic energy from the vector form + parameters: + S: the spin vector + kx, ky, kz: the direction of the anisotropy + """ + Scart = sphere_to_cartesian(S) + k = sphere_to_cartesian([k_theta, k_phi]) + print(f"k shape = {k.shape}") + print(f"Scart shape = {Scart.shape}") + E = [-amplitude * (Si @ k) ** 2 + isotropic_part for Si in Scart.T] + return E + + +def T6_to_T(T6): + """ + Transform the anisotropic tensor to the matrix form + """ + Txx, Tyy, Tzz, Tyz, Tzx, Txy = T6 + T = np.array([[Txx, Txy, Tzx], [Txy, Tyy, Tyz], [Tzx, Tyz, Tzz]]) + return T + + +def is_rank_one(T): + """ + Check if the tensor is rank one + """ + return matrix_rank(T) == 1 + + +def fit_anisotropy(thetas, phis, values): + """ + Fit the anisotropic tensor to the data + parameters: + theta: the polar angle in degree + phi: the azimuthal angle in degree + value: the anisotropic value + Return: + T: the anisotropic tensor + """ + # transform the data to the cartesian coordinates + # fit the data to the anisotropic energy + angles = np.vstack([thetas, phis]) + params, cov = curve_fit(anisotropy_energy, angles, values) + # check if the fitting is consistent with the data + T = T6_to_T(params) + direction, amp = anisotropy_tensor_to_vector(T) + return direction, amp + + +def anisotropy_energy_vector_form2(direction, amplitude, S): + """ + Calculate the anisotropic energy + parameters: + direction: the easy axis/ hard axis of the anisotropy + amplitude: the amplitude of the anisotropy + S: the spin vector + Return: + E: the anisotropic energy, E = - amplitude * (S @ direction)**2 + """ + # normalize the direction + direction = direction / np.linalg.norm(direction) + print(f"direction shape = {direction.shape}") + E = -amplitude * (S @ direction) ** 2 + return E + + +def anisotropy_energy_tensor_form(T, S): + """ + Calculate the anisotropic energy + parameters: + T: the anisotropic tensor + S: the spin vector + Return: + E: the anisotropic energy, E = - S.T @ T @ S + """ + E = -S.T @ T @ S + return E + + +def anisotropy_vector_to_tensor(direction, amplitude): + """ + Transform the anisotropic vector to the anisotropic tensor + parameters: + direction: the easy axis/ hard axis of the anisotropy (direction is normalized) + amplitude: the amplitude of the anisotropy + Return: + T: the anisotropic tensor + """ + direction = direction / np.linalg.norm(direction) + T = amplitude * np.outer(direction, direction) + return T + + +def anisotropy_tensor_to_vector(T): + """ + Transform the anisotropic tensor to the anisotropic vector + parameters: + T: the anisotropic tensor + Return: + direction: the easy axis/ hard axis of the anisotropy + amplitude: the amplitude of the anisotropy, if the anisotropy is positive, the easy axis is the easy axis, otherwise, the hard axis is the easy axis + """ + w, v = np.linalg.eig(T) + if not is_rank_one(T): + # print("Warning: The anisotropy tensor is not rank one. The tensor cannot be transformed to the vector form.") + # print(f"The eigenvalues are {w}.") + pass + index = np.argmax(np.abs(w)) + direction = v[:, index] + direction = direction / np.sign(direction[0]) + amplitude = w[index] + return direction, amplitude + + +def test_anisotorpy_vector_to_tensor(): + direction = np.random.rand(3) + direction = direction / np.linalg.norm(direction) + amplitude = random.uniform(-1, 1) + S = np.random.rand(3) + S = S / np.linalg.norm(S) + T = anisotropy_vector_to_tensor(direction, amplitude) + E_vector = anisotropy_energy_vector_form(direction, amplitude, S) + E_tensor = anisotropy_energy_tensor_form(T, S) + diff = E_vector - E_tensor + print(f"diff = {diff}") + assert np.abs(diff) < 1e-10 + + # test if the inverse transformation get the direction and amplitude back + dir2, amp2 = anisotropy_tensor_to_vector(T) + + # set the first element of the direction to be positive + if direction[0] * dir2[0] < 0: + dir2 = -dir2 + diff = np.linalg.norm(dir2 - direction) + # print(f'direction = {direction}, amplitude = {amplitude}') + # print(f'dir2 = {dir2}, amp2 = {amp2}') + assert diff < 1e-10 + assert np.abs(amp2 - amplitude) < 1e-10 + + +def test_anisotropy_tensor_to_vector(): + T = np.random.rand(3, 3) + T = T + T.T + T = T - np.trace(T) / 3 + direction, amplitude = anisotropy_tensor_to_vector(T) + T2 = anisotropy_vector_to_tensor(direction, amplitude) + print(f"T = {T}") + print(f"T2 = {T2}") + diff = np.linalg.norm(T - T2) + print(f"diff = {diff}") + assert diff < 1e-10 + + +def test_fit_anisotropy(): + data = np.loadtxt("anisotropy.dat") + theta, phi, value = data[:, 0], data[:, 1], data[:, 2] + angles = np.vstack([theta, phi]) + T6 = fit_anisotropy(theta, phi, value) + T = T6_to_T(T6) + if not is_rank_one(T): + print("Warning: The anisotropy tensor is not rank one. ") + fitted_values = anisotropy_energy(angles, *T6) + delta = fitted_values - value + print(f"Max value = {np.max(value)}, Min value = {np.min(value)}") + print( + f"Max fitted value = {np.max(fitted_values)}, Min fitted value = {np.min(fitted_values)}" + ) + for i in range(len(theta)): + print( + f"theta = {theta[i]}, phi = {phi[i]}, value = {value[i]}, fitted value = {fitted_values[i]}, delta = {delta[i]}" + ) + + +def view_anisotropy_strain(): + strains1 = [(x, 0.0) for x in np.arange(0.000, 0.021, 0.002)] + strains2 = [(0.02, y) for y in np.arange(0.000, 0.021, 0.002)] + strains = strains1 + strains2 + path = Path("anisotropy_strain_from_tensor") + path.mkdir(exist_ok=True) + fname = "a.dat" + fh = open(fname, "w") + fh.write("# s0 s1 axis amplitude axis_type angle_111 \n") + for strain in strains: + s0, s1 = strain + ani = Anisotropy.fit_from_data_file(f"a{s0:.3f}_b{s1:.3f}_plusU.dat") + dx, dy, dz = ani.direction + angle_from_111 = ( + np.arccos(ani.direction @ np.array([1, 1, 1]) / np.sqrt(3)) * 180 / np.pi + ) + fh.write( + f"{s0:.3f} {s1:.3f} ( {dx:.3f} {dy:.3f} {dz:.3f} ) {ani.amplitude:.5f} {ani.axis_type} {angle_from_111} \n" + ) + ani.plot_3d( + surface=True, figname=path / f"a{s0:.3f}_b{s1:.3f}_plusU.png", show=False + ) + fh.close() + + +def view_anisotropy_strain_raw(): + strains1 = [(x, 0.0) for x in np.arange(0.000, 0.021, 0.002)] + strains2 = [(0.02, y) for y in np.arange(0.000, 0.021, 0.002)] + strains = strains1 + strains2 + path = Path("anisotropy_strain_raw") + path.mkdir(exist_ok=True) + fname = "a_raw.dat" + fh = open(fname, "w") + fh.write("# s0 s1 direction(max) direction(min) amplitude\n") + for strain in strains: + print(f"strain = {strain}") + s0, s1 = strain + # plot_3D_surface(f"a{s0:.3f}_b{s1:.3f}_plusU.dat") + angle_max, angle_min, amplitude = plot_3D_surface( + f"a{s0:.3f}_b{s1:.3f}_plusU.dat", + figname=path / f"a{s0:.3f}_b{s1:.3f}_plusU.png", + ) + dmax = sphere_to_cartesian(angle_max) + dmin = sphere_to_cartesian(angle_min) + fh.write( + f"{s0:.3f} {s1:.3f} ( {dmax[0]:.3f} {dmax[1]:.3f} {dmax[2]:.3f} ) ({dmin[0]:.3f} {dmin[1]:.3f} {dmin[2]:.3f}) {amplitude:.5f} \n" + ) + + plt.close() + + +if __name__ == "__main__": + # test_fit_anisotropy() + # test_anisotorpy_vector_to_tensor() + # test_anisotropy_tensor_to_vector() + # ani = Anisotropy.fit_from_data("anisotropy.dat") + # ani = Anisotropy.fit_from_data_file("a0.002_b0.000_plusU.dat") + # ani = Anisotropy.fit_from_data_file("a0.002_b0.000_plusU.dat", method='tensor') + # print(f'direction = {ani.direction}') + # ani.plot_3d() + # plot_3D_surface("a0.002_b0.000_plusU.dat") + # plot_3D_surface_interpolation("a0.020_b0.020_plusU.dat") + # plot_3D_surface("a0.020_b0.020_plusU.dat") + # plot_3D_surface("a0.020_b0.000_plusU.dat") + view_anisotropy_strain() + # view_anisotropy_strain_raw() + # s0=0.000 + # s1=0.000 + # plot_3D_surface_interpolation(f"a{s0:.3f}_b{s1:.3f}_plusU.dat", figname=None) diff --git a/TB2J/contour.py b/TB2J/contour.py index 13ba0ba..9b7fcc7 100644 --- a/TB2J/contour.py +++ b/TB2J/contour.py @@ -1,11 +1,35 @@ import numpy as np from scipy.special import roots_legendre +from TB2J.utils import simpson_nonuniform_weight + class Contour: def __init__(self, emin, emax=0.0): self.emin = emin self.emax = emax + self._weights = None + + @property + def weights(self): + if self._weights is None: + self._weights = simpson_nonuniform_weight(self.path) + return self._weights + + def integrate_func(self, func): + """ + integrate f along the path + """ + return np.dot(func(self.path), self.weights) + + def integrate_values(self, values): + """ + integrate f along the path + """ + ret = 0 + for i in range(len(values)): + ret += values[i] * self.weights[i] + return ret def build_path_semicircle(self, npoints, endpoint=True): R = (self.emax - self.emin) / 2.0 diff --git a/TB2J/exchange.py b/TB2J/exchange.py index fb60962..39430b9 100644 --- a/TB2J/exchange.py +++ b/TB2J/exchange.py @@ -1,120 +1,40 @@ -from collections import defaultdict, OrderedDict import os +import pickle +from collections import OrderedDict, defaultdict + import numpy as np -from TB2J.green import TBGreen -from TB2J.pauli import pauli_block_all, pauli_block_sigma_norm, pauli_mat -from TB2J.utils import symbol_number, kmesh_to_R -from TB2J.io_exchange import SpinIO from tqdm import tqdm -from TB2J.external import p_map + from TB2J.contour import Contour -from TB2J.utils import simpson_nonuniform, trapezoidal_nonuniform, split_symbol_number +from TB2J.exchange_params import ExchangeParams +from TB2J.external import p_map +from TB2J.green import TBGreen +from TB2J.io_exchange import SpinIO +from TB2J.mycfr import CFR from TB2J.orbmap import map_orbs_matrix -import pickle - - -class ExchangeParams: - def __init__( - self, - efermi, - basis=None, - magnetic_elements=[], - include_orbs={}, - kmesh=[4, 4, 4], - emin=-15, # integration lower bound, relative to fermi energy - # integration upper bound. Should be 0 (fermi energy). But DFT codes define Fermi energy in various ways. - emax=0.05, - nz=100, - # the delta in the (i delta) in green's function to prevent divergence - exclude_orbs=[], # - ne=None, # number of electrons in Wannier function. - Rcut=None, # Rcut. - use_cache=False, - np=1, - description="", - write_density_matrix=False, - orb_decomposition=False, - output_path="TB2J_results", - ): - self.efermi = efermi - self.emin = emin - self.emax = emax - self.nz = nz - self.Rcut = Rcut - self.basis = basis - self.magnetic_elements = magnetic_elements - self.include_orbs = include_orbs - - self.exclude_orbs = exclude_orbs - self.ne = ne - self._use_cache = use_cache - self.np = np - self._kmesh = kmesh - self.orb_decomposition = orb_decomposition - self.write_density_matrix = write_density_matrix - self.description = description - self.output_path = output_path +from TB2J.pauli import pauli_block_all, pauli_block_sigma_norm +from TB2J.utils import ( + kmesh_to_R, + symbol_number, +) class Exchange(ExchangeParams): - def __init__( - self, - tbmodels, - atoms, - efermi, - basis=None, - magnetic_elements=[], - include_orbs={}, - kmesh=[4, 4, 4], - emin=-15, # integration lower bound, relative to fermi energy - # integration upper bound. Should be 0 (fermi energy). But DFT codes define Fermi energy in various ways. - emax=0.05, - nz=100, - # the delta in the (i delta) in green's function to prevent divergence - exclude_orbs=[], # - ne=None, # number of electrons in Wannier function. - Rcut=None, # Rcut. - use_cache=False, - np=1, - description="", - write_density_matrix=False, - output_path="TB2J_results", - orb_decomposition=False, - ): + def __init__(self, tbmodels, atoms, **params): self.atoms = atoms - super().__init__( - efermi=efermi, - basis=basis, - magnetic_elements=magnetic_elements, - include_orbs=include_orbs, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - ne=ne, - Rcut=Rcut, - use_cache=use_cache, - np=np, - description=description, - write_density_matrix=write_density_matrix, - orb_decomposition=orb_decomposition, - output_path=output_path, - ) + super().__init__(**params) self._prepare_kmesh(self._kmesh) self._prepare_Rlist() self.set_tbmodels(tbmodels) self._adjust_emin() + # self._prepare_elist(method="CFR") self._prepare_elist(method="legendre") self._prepare_basis() self._prepare_orb_dict() self._prepare_distance() # whether to calculate J and DMI with NJt method. - self.calc_NJt = True # self._prepare_NijR() - self.Ddict_NJT = None - self.Jdict_NJT = None self._is_collinear = True self.has_elistc = False self._clean_tbmodels() @@ -125,35 +45,41 @@ def _prepare_Jorb_file(self): os.makedirs(self.orbpath, exist_ok=True) def _adjust_emin(self): - self.emin = self.G.find_energy_ingap(rbound=self.efermi - 5.0) - self.efermi + self.emin = self.G.find_energy_ingap(rbound=self.efermi - 15.0) - self.efermi + # self.emin = self.G.find_energy_ingap(rbound=self.efermi - 15.0) - self.efermi + # self.emin = -42.0 print(f"A gap is found at {self.emin}, set emin to it.") def set_tbmodels(self, tbmodels): pass def _clean_tbmodels(self): - del self.tbmodel - del self.G.tbmodel + # del self.tbmodel + # del self.G.tbmodel + pass - def _prepare_kmesh(self, kmesh): + def _prepare_kmesh(self, kmesh, ibz=False): for k in kmesh: self.kmesh = list(map(lambda x: x // 2 * 2 + 1, kmesh)) - def _prepare_elist(self, method="legendre"): + def _prepare_elist(self, method="CFR"): """ prepare list of energy for integration. The path has three segments: emin --1-> emin + 1j*height --2-> emax+1j*height --3-> emax """ - self.contour = Contour(self.emin, self.emax) # if method.lower() == "rectangle": # self.contour.build_path_rectangle( # height=self.height, nz1=self.nz1, nz2=self.nz2, nz3=self.nz3 # ) if method.lower() == "semicircle": + self.contour = Contour(self.emin, self.emax) self.contour.build_path_semicircle(npoints=self.nz, endpoint=True) elif method.lower() == "legendre": + self.contour = Contour(self.emin, self.emax) self.contour.build_path_legendre(npoints=self.nz, endpoint=True) + elif method.lower() == "cfr": + self.contour = CFR(nz=self.nz, T=600) else: raise ValueError(f"The path cannot be of type {method}.") @@ -188,14 +114,20 @@ def _prepare_orb_dict(self): # e.g. Fe2, dxy, _, _ if isinstance(base, str): atom_sym, orb_sym = base.split("|")[:2] + iatom = sdict[atom_sym] else: - atom_sym, orb_sym = base[:2] + try: + atom_sym, orb_sym = base[:2] + iatom = sdict[atom_sym] + except Exception: + iatom = base.iatom + atom_sym = base.iatom + orb_sym = base.sym if atom_sym in adict: adict[atom_sym].append(orb_sym) else: adict[atom_sym] = [orb_sym] - iatom = sdict[atom_sym] if iatom not in self.orb_dict: self.orb_dict[iatom] = [i] self.labels[iatom] = [orb_sym] @@ -218,14 +150,15 @@ def _prepare_orb_dict(self): ) if not self._is_collinear: for iatom, orb in self.orb_dict.items(): + # print(f"iatom: {iatom}, orb: {orb}") nsorb = len(self.orb_dict[iatom]) - if nsorb % 2 != 0: + if nsorb % 2 != 0 and False: raise ValueError( f"""The number of spin-orbitals for atom {iatom} is not even, {nsorb} spin-orbitals are found near this atom. -which means the spin up/down does not have same number of orbitals. +which means the spin up/down does not have same number of orbitals. This is often because the Wannier functions are wrongly defined, -or badly localized. Please check the Wannier centers in the Wannier90 output file. +or badly localized. Please check the Wannier centers in the Wannier90 output file. """ ) self._spin_dict = {} @@ -240,7 +173,9 @@ def _prepare_orb_mmat(self): self.mmats = {} self.orbital_names = {} self.norb_reduced = {} - if self.backend_name.upper() == "SIESTA": + if self.backend_name.upper() in ["SIESTA", "ABACUS", "LCAOHAMILTONIAN"]: + # print(f"magntic_elements: {self.magnetic_elements}") + # print(f"include_orbs: {self.include_orbs}") syms = self.atoms.get_chemical_symbols() for iatom, orbs in self.labels.items(): if (self.include_orbs is not None) and syms[iatom] in self.include_orbs: @@ -250,6 +185,7 @@ def _prepare_orb_mmat(self): include_only=self.include_orbs[syms[iatom]], ) else: + # print(f"orbs: {orbs}") mmat, reduced_orbs = map_orbs_matrix( orbs, spinor=not (self._is_collinear), include_only=None ) @@ -305,7 +241,7 @@ def simplify_orbital_contributions(self, Jorbij, iatom, jatom): """ sum up the contribution of all the orbitals with same (n,l,m) """ - if self.backend_name.upper() == "SIESTA": + if self.backend_name.upper() in ["SIESTA", "ABACUS", "LCAOHAMILTONIAN"]: mmat_i = self.mmats[iatom] mmat_j = self.mmats[jatom] Jorbij = mmat_i.T @ Jorbij @ mmat_j @@ -334,14 +270,17 @@ def set_tbmodels(self, tbmodels): """ self.tbmodel = tbmodels self.backend_name = self.tbmodel.name - # TODO: check if tbmodels are really a tbmodel with SOC. self.G = TBGreen( - self.tbmodel, - self.kmesh, - self.efermi, + tbmodel=self.tbmodel, + kmesh=self.kmesh, + ibz=self.ibz, + gamma=True, + efermi=self.efermi, use_cache=self._use_cache, - nproc=self.np, + nproc=self.nproc, ) + if self.efermi is None: + self.efermi = self.G.efermi self.norb = self.G.norb self.nbasis = self.G.nbasis # self.rho = np.zeros((self.nbasis, self.nbasis), dtype=complex) @@ -349,12 +288,22 @@ def set_tbmodels(self, tbmodels): self.A_ijR_list = defaultdict(lambda: []) self.A_ijR = defaultdict(lambda: np.zeros((4, 4), dtype=complex)) self.A_ijR_orb = dict() - self.HR0 = self.G.H0 + # self.HR0 = self.tbmodel.get_H0() + if hasattr(self.tbmodel, "get_H0"): + self.HR0 = self.tbmodel.get_H0() + else: + self.HR0 = self.G.H0 self._is_collinear = False self.Pdict = {} if self.write_density_matrix: self.G.write_rho_R() + def get_MAE(self, thetas, phis): + """ + Calculate the magnetic anisotropy energy. + """ + pass + def _prepare_NijR(self): self.N = {} for R in self.Rlist: @@ -476,7 +425,7 @@ def A_to_Jtensor_orb(self): if self.orb_decomposition: for key, val in self.A_ijR_orb.items(): R, iatom, jatom = key - Rm = tuple(-x for x in R) + # Rm = tuple(-x for x in R) # valm = self.A_ijR_orb[(Rm, jatom, iatom)] ni = self.norb_reduced[iatom] nj = self.norb_reduced[jatom] @@ -612,77 +561,39 @@ def get_rho_atom(self): self.rho_dict = rho return self.rho_dict - def calculate_DMI_NJT(self): - """ - calculate exchange and DMI with the - D(i,j) = - """ - Ddict_NJT = {} - Jdict_NJT = {} - for R in self.short_Rlist: - N = self.N[tuple(-np.array(R))] # density matrix - t = self.tbmodel.get_hamR(R) # hopping parameter - for iatom in self.ind_mag_atoms: - orbi = self.iorb(iatom) - ni = len(orbi) - for jatom in self.ind_mag_atoms: - orbj = self.iorb(jatom) - nj = len(orbj) - Nji = N[np.ix_(orbj, orbi)] - tij = t[np.ix_(orbi, orbj)] - D = np.zeros(3, dtype=float) - J = np.zeros(3, dtype=float) - for dim in range(3): - # S_i = pauli_mat(ni, dim + - # 1) #*self.rho[np.ix_(orbi, orbi)] - # S_j = pauli_mat(nj, dim + - # 1) #*self.rho[np.ix_(orbj, orbj)] - # TODO: Note that rho is complex, not the imaginary part - S_i = pauli_mat(ni, dim + 1) * self.rho[np.ix_(orbi, orbi)] - S_j = pauli_mat(nj, dim + 1) * self.rho[np.ix_(orbj, orbj)] - - # [S, t]+ = Si tij + tij Sj, where - # Si and Sj are the spin operator - # Here we do not have L operator, so J-> S - Jt = np.matmul(S_i, tij) + np.matmul(tij, S_j) - - Jtminus = np.matmul(S_i, tij) - np.matmul(tij, S_j) - # D = -1/2 Tr Nji [J, tij] - # Trace over spin and orb - D[dim] = -0.5 * np.imag(np.trace(np.matmul(Nji, Jt))) - J[dim] = -0.5 * np.imag(np.trace(np.matmul(Nji, Jtminus))) - ispin = self.ispin(iatom) - jspin = self.ispin(jatom) - Ddict_NJT[(R, ispin, jspin)] = D - Jdict_NJT[(R, ispin, jspin)] = J - self.Jdict_NJT = Jdict_NJT - self.Ddict_NJT = Ddict_NJT - return Ddict_NJT - def integrate(self, AijRs, AijRs_orb=None, method="simpson"): """ AijRs: a list of AijR, wherer AijR: array of ((nR, n, n, 4,4), dtype=complex) """ - if method == "trapezoidal": - integrate = trapezoidal_nonuniform - elif method == "simpson": - integrate = simpson_nonuniform + # if method == "trapezoidal": + # integrate = trapezoidal_nonuniform + # elif method == "simpson": + # integrate = simpson_nonuniform + # # self.rho = integrate(self.contour.path, rhoRs) for iR, R in enumerate(self.R_ijatom_dict): for iatom, jatom in self.R_ijatom_dict[R]: f = AijRs[(R, iatom, jatom)] - self.A_ijR[(R, iatom, jatom)] = integrate(self.contour.path, f) - if self.orb_decomposition: - self.A_ijR_orb[(R, iatom, jatom)] = integrate( - self.contour.path, AijRs_orb[(R, iatom, jatom)] - ) + # self.A_ijR[(R, iatom, jatom)] = integrate(self.contour.path, f) + self.A_ijR[(R, iatom, jatom)] = self.contour.integrate_values(f) - def get_AijR(self, e): - GR = self.G.get_GR(self.short_Rlist, energy=e, get_rho=False) + if self.orb_decomposition: + # self.A_ijR_orb[(R, iatom, jatom)] = integrate( + # self.contour.path, AijRs_orb[(R, iatom, jatom)] + # ) + self.contour.integrate_values(AijRs_orb[(R, iatom, jatom)]) + + def get_quantities_per_e(self, e): + Gk_all = self.G.get_Gk_all(e) + # mae = self.get_mae_kspace(Gk_all) + mae = None + # TODO: get the MAE from Gk_all + GR = self.G.get_GR(self.short_Rlist, energy=e, get_rho=False, Gk_all=Gk_all) + # TODO: define the quantities for one energy. AijR, AijR_orb = self.get_all_A(GR) - return AijR, AijR_orb + return dict(AijR=AijR, AijR_orb=AijR_orb, mae=mae) def save_AijR(self, AijRs, fname): result = dict(path=self.contour.path, AijRs=AijRs) @@ -693,6 +604,7 @@ def validate(self): """ Do some sanity check before proceding. """ + pass def calculate_all(self): """ @@ -701,40 +613,38 @@ def calculate_all(self): print("Green's function Calculation started.") AijRs = {} - AijRs_orb = {} self.validate() npole = len(self.contour.path) - if self.np > 1: - results = p_map(self.get_AijR, self.contour.path, num_cpus=self.np) + if self.nproc > 1: + results = p_map( + self.get_quantities_per_e, self.contour.path, num_cpus=self.nproc + ) else: - results = map(self.get_AijR, tqdm(self.contour.path, total=npole)) + results = map( + self.get_quantities_per_e, tqdm(self.contour.path, total=npole) + ) for i, result in enumerate(results): for iR, R in enumerate(self.R_ijatom_dict): for iatom, jatom in self.R_ijatom_dict[R]: if (R, iatom, jatom) in AijRs: - AijRs[(R, iatom, jatom)].append(result[0][R, iatom, jatom]) + AijRs[(R, iatom, jatom)].append(result["AijR"][R, iatom, jatom]) if self.orb_decomposition: AijRs_orb[(R, iatom, jatom)].append( - result[1][R, iatom, jatom] + result["AijR_orb"][R, iatom, jatom] ) else: AijRs[(R, iatom, jatom)] = [] - AijRs[(R, iatom, jatom)].append(result[0][R, iatom, jatom]) + AijRs[(R, iatom, jatom)].append(result["AijR"][R, iatom, jatom]) if self.orb_decomposition: AijRs_orb[(R, iatom, jatom)] = [] AijRs_orb[(R, iatom, jatom)].append( - result[1][R, iatom, jatom] + result["AijR_orb"][R, iatom, jatom] ) - if self.np > 1: - # executor.close() - # executor.join() - # executor.clear() - pass # self.save_AijRs(AijRs) self.integrate(AijRs, AijRs_orb) @@ -773,8 +683,6 @@ def write_output(self, path="TB2J_results"): Jani_dict=self.Jani, DMI_orb=self.DMI_orb, Jani_orb=self.Jani_orb, - NJT_Jdict=self.Jdict_NJT, - NJT_ddict=self.Ddict_NJT, biquadratic_Jdict=self.B, debug_dict=self.debug_dict, description=self.description, @@ -813,8 +721,6 @@ def write_output(self, path="TB2J_results"): distance_dict=self.distance_dict, exchange_Jdict=self.exchange_Jdict, dmi_ddict=None, - NJT_Jdict=None, - NJT_ddict=None, biquadratic_Jdict=self.B, description=self.description, ) diff --git a/TB2J/exchangeCL2.py b/TB2J/exchangeCL2.py index 8c80c5f..513bce5 100644 --- a/TB2J/exchangeCL2.py +++ b/TB2J/exchangeCL2.py @@ -1,15 +1,18 @@ """ Exchange from Green's function """ + import os from collections import defaultdict + import numpy as np -from TB2J.green import TBGreen -from TB2J.io_exchange import SpinIO from tqdm import tqdm + from TB2J.external import p_map +from TB2J.green import TBGreen +from TB2J.io_exchange import SpinIO + from .exchange import ExchangeCL -from .utils import simpson_nonuniform, trapezoidal_nonuniform class ExchangeCL2(ExchangeCL): @@ -20,18 +23,18 @@ def set_tbmodels(self, tbmodels): self.tbmodel_up, self.tbmodel_dn = tbmodels self.backend_name = self.tbmodel_up.name self.Gup = TBGreen( - self.tbmodel_up, - self.kmesh, - self.efermi, + tbmodel=self.tbmodel_up, + kmesh=self.kmesh, + efermi=self.efermi, use_cache=self._use_cache, - nproc=self.np, + nproc=self.nproc, ) self.Gdn = TBGreen( - self.tbmodel_dn, - self.kmesh, - self.efermi, + tbmodel=self.tbmodel_dn, + kmesh=self.kmesh, + efermi=self.efermi, use_cache=self._use_cache, - nproc=self.np, + nproc=self.nproc, ) if self.write_density_matrix: self.Gup.write_rho_R( @@ -221,24 +224,30 @@ def finalize(self): # shutil.rmtree(path) def integrate(self, method="simpson"): - if method == "trapezoidal": - integrate = trapezoidal_nonuniform - elif method == "simpson": - integrate = simpson_nonuniform + # if method == "trapezoidal": + # integrate = trapezoidal_nonuniform + # elif method == "simpson": + # integrate = simpson_nonuniform for R, ijpairs in self.R_ijatom_dict.items(): for iatom, jatom in ijpairs: - self.Jorb[(R, iatom, jatom)] = integrate( - self.contour.path, self.Jorb_list[(R, iatom, jatom)] + # self.Jorb[(R, iatom, jatom)] = integrate( + # self.contour.path, self.Jorb_list[(R, iatom, jatom)] + # ) + # self.JJ[(R, iatom, jatom)] = integrate( + # self.contour.path, self.JJ_list[(R, iatom, jatom)] + # ) + self.Jorb[(R, iatom, jatom)] = self.contour.integrate_values( + self.Jorb_list[(R, iatom, jatom)] ) - self.JJ[(R, iatom, jatom)] = integrate( - self.contour.path, self.JJ_list[(R, iatom, jatom)] + self.JJ[(R, iatom, jatom)] = self.contour.integrate_values( + self.JJ_list[(R, iatom, jatom)] ) - def get_AijR(self, e): + def get_quantities_per_e(self, e): GR_up = self.Gup.get_GR(self.short_Rlist, energy=e, get_rho=False) GR_dn = self.Gdn.get_GR(self.short_Rlist, energy=e, get_rho=False) Jorb_list, JJ_list = self.get_all_A(GR_up, GR_dn) - return Jorb_list, JJ_list + return dict(Jorb_list=Jorb_list, JJ_list=JJ_list) def calculate_all(self): """ @@ -247,21 +256,24 @@ def calculate_all(self): print("Green's function Calculation started.") npole = len(self.contour.path) - if self.np == 1: - results = map(self.get_AijR, tqdm(self.contour.path, total=npole)) + if self.nproc == 1: + results = map( + self.get_quantities_per_e, tqdm(self.contour.path, total=npole) + ) else: - # pool = ProcessPool(nodes=self.np) + # pool = ProcessPool(nodes=self.nproc) # results = pool.map(self.get_AijR_rhoR ,self.contour.path) - results = p_map(self.get_AijR, self.contour.path, num_cpus=self.np) + results = p_map( + self.get_quantities_per_e, self.contour.path, num_cpus=self.nproc + ) for i, result in enumerate(results): - Jorb_list, JJ_list = result + Jorb_list = result["Jorb_list"] + JJ_list = result["JJ_list"] for iR, R in enumerate(self.R_ijatom_dict): for iatom, jatom in self.R_ijatom_dict[R]: key = (R, iatom, jatom) self.Jorb_list[key].append(Jorb_list[key]) self.JJ_list[key].append(JJ_list[key]) - if self.np > 1: - pass self.integrate() self.get_rho_atom() self.A_to_Jtensor() diff --git a/TB2J/exchange_params.py b/TB2J/exchange_params.py new file mode 100644 index 0000000..a0a6129 --- /dev/null +++ b/TB2J/exchange_params.py @@ -0,0 +1,253 @@ +import argparse +from dataclasses import dataclass + +import yaml + +__all__ = ["ExchangeParams", "add_exchange_args_to_parser", "parser_argument_to_dict"] + + +@dataclass +class ExchangeParams: + """ + A class to store the parameters for exchange calculation. + """ + + efermi: float + basis = [] + magnetic_elements = [] + include_orbs = {} + _kmesh = [4, 4, 4] + emin: float = -15 + emax: float = 0.05 + nz: int = 100 + exclude_orbs = [] + ne: int = 0 + Rcut: float = None + _use_cache: bool = False + nproc: int = 1 + description: str = "" + write_density_matrix: bool = False + orb_decomposition: bool = False + output_path: str = "TB2J_results" + mae_angles = None + orth = False + ibz = False + + def __init__( + self, + efermi=-10.0, + basis=None, + magnetic_elements=None, + include_orbs=None, + kmesh=[4, 4, 4], + emin=-15, + emax=0.00, + nz=100, + ne=None, + Rcut=None, + use_cache=False, + nproc=1, + description="", + write_density_matrix=False, + orb_decomposition=False, + output_path="TB2J_results", + exclude_orbs=[], + mae_angles=None, + orth=False, + ibz=False, + ): + self.efermi = efermi + self.basis = basis + # self.magnetic_elements = magnetic_elements + # self.include_orbs = include_orbs + self.magnetic_elements, self.include_orbs = self.set_magnetic_elements( + magnetic_elements, include_orbs + ) + self._kmesh = kmesh + self.emin = emin + self.emax = emax + self.nz = nz + self.exclude_orbs = exclude_orbs + self.ne = ne + self.Rcut = Rcut + self._use_cache = use_cache + self.nproc = nproc + self.description = description + self.write_density_matrix = write_density_matrix + self.orb_decomposition = orb_decomposition + self.output_path = output_path + self.mae_angles = mae_angles + self.orth = orth + self.ibz = ibz + + def set_params(self, **kwargs): + for key, val in kwargs.items(): + setattr(self, key, val) + + def save_to_yaml(self, fname): + with open(fname, "w") as myfile: + yaml.dump(self.__dict__, myfile) + + def set_magnetic_elements(self, magnetic_elements, include_orbs): + # magnetic_elements = exargs.pop("magnetic_elements") + # include_orbs = exargs.pop("include_orbs") + if include_orbs is None: + include_orbs = {} + if isinstance(magnetic_elements, str): + magnetic_elements = [magnetic_elements] + for element in magnetic_elements: + if "_" in element: + elem = element.split("_")[0] + orb = element.split("_")[1:] + include_orbs[elem] = orb + else: + include_orbs[element] = None + + magnetic_elements = list(include_orbs.keys()) + return magnetic_elements, include_orbs + + +def add_exchange_args_to_parser(parser: argparse.ArgumentParser): + parser.add_argument( + "--elements", + help="elements to be considered in Heisenberg model", + default=None, + type=str, + nargs="*", + ) + + parser.add_argument( + "--spinor", + help="whether the Wannier functions are spinor. Default: False", + action="store_true", + default=False, + ) + + parser.add_argument( + "--rcut", + help="cutoff of spin pair distance. The default is to calculate all commensurate R point to the k mesh.", + default=None, + type=float, + ) + parser.add_argument("--efermi", help="Fermi energy in eV", default=None, type=float) + parser.add_argument( + "--ne", + help="number of electrons in the unit cell. If not given, TB2J will use the fermi energy to compute it.", + ) + parser.add_argument( + "--kmesh", + help="kmesh in the format of kx ky kz", + type=int, + nargs="*", + default=[5, 5, 5], + ) + parser.add_argument( + "--emin", + help="energy minimum below efermi, default -14 eV", + type=float, + default=-14.0, + ) + parser.add_argument( + "--emax", + help="energy maximum above efermi, default 0.0 eV", + type=float, + default=0.0, + ) + parser.add_argument( + "--nz", + help="number of steps for semicircle contour, default: 100", + default=100, + type=int, + ) + parser.add_argument( + "--cutoff", + help="The minimum of J amplitude to write, (in eV), default is 1e-5 eV", + default=1e-5, + type=float, + ) + parser.add_argument( + "--exclude_orbs", + help="the indices of wannier functions to be excluded from magnetic site. counting start from 0", + default=[], + type=int, + nargs="+", + ) + + parser.add_argument( + "--np", + "--nproc", + help="number of cpu cores to use in parallel, default: 1", + default=1, + type=int, + ) + + parser.add_argument( + "--use_cache", + help="whether to use disk file for temporary storing wavefunctions and hamiltonian to reduce memory usage. Default: False", + action="store_true", + default=False, + ) + + parser.add_argument( + "--description", + help="add description of the calculatiion to the xml file. Essential information, like the xc functional, U values, magnetic state should be given.", + type=str, + default="Calculated with TB2J.", + ) + + parser.add_argument( + "--orb_decomposition", + default=False, + action="store_true", + help="whether to do orbital decomposition in the non-collinear mode.", + ) + + parser.add_argument( + "--output_path", + help="The path of the output directory, default is TB2J_results", + type=str, + default="TB2J_results", + ) + parser.add_argument( + "--write_dm", + help="whether to write density matrix", + action="store_true", + default=False, + ) + + parser.add_argument( + "--orth", + help="whether to use lowdin orthogonalization before diagonalization (for testing only)", + action="store_true", + default=False, + ) + + parser.add_argument( + "--ibz", + help=" use irreducible k-points in the Brillouin zone. (Note: only for computing total MAE).", + action="store_true", + default=False, + ) + + return parser + + +def parser_argument_to_dict(args) -> dict: + return { + "efermi": args.efermi, + "magnetic_elements": args.elements, + "kmesh": args.kmesh, + "emin": args.emin, + "emax": args.emax, + "nz": args.nz, + "exclude_orbs": args.exclude_orbs, + "ne": args.ne, + "Rcut": args.rcut, + "use_cache": args.use_cache, + "nproc": args.np, + "description": args.description, + "write_density_matrix": args.write_dm, + "orb_decomposition": args.orb_decomposition, + "output_path": args.output_path, + "orth": args.orth, + } diff --git a/TB2J/green.py b/TB2J/green.py index 7862311..ebc16a7 100644 --- a/TB2J/green.py +++ b/TB2J/green.py @@ -1,18 +1,34 @@ -import numpy as np -from collections import defaultdict -from ase.dft.kpoints import monkhorst_pack -from shutil import rmtree +import copy import os +import pickle +import sys import tempfile +from collections import defaultdict +from shutil import rmtree + +import numpy as np +from HamiltonIO.model.occupations import GaussOccupations +from HamiltonIO.model.occupations import myfermi as fermi from pathos.multiprocessing import ProcessPool -import sys -import pickle -import warnings -from TB2J.mathutils.fermi import fermi + +from TB2J.kpoints import ir_kpts, monkhorst_pack + +# from TB2J.mathutils.fermi import fermi MAX_EXP_ARGUMENT = np.log(sys.float_info.max) +def eigen_to_G2(H, S, efermi, energy): + """calculate green's function from eigenvalue/eigenvector for energy(e-ef): G(e-ef). + :param H: Hamiltonian matrix in eigenbasis + :param S: Overlap matrix in eigenbasis + :param efermi: fermi energy + :param energy: energy level e - efermi + """ + # G = ((E+Ef) S - H)^-1 + return np.linalg.inv((energy + efermi) * S - H) + + def eigen_to_G(evals, evecs, efermi, energy): """calculate green's function from eigenvalue/eigenvector for energy(e-ef): G(e-ef). :param evals: eigen values @@ -22,13 +38,20 @@ def eigen_to_G(evals, evecs, efermi, energy): :returns: Green's function G, :rtype: Matrix with same shape of the Hamiltonian (and eigenvector) """ - return ( - np.einsum("ij, j-> ij", evecs, 1.0 / (-evals + (energy + efermi))) - @ evecs.conj().T + # return ( + # np.einsum("ij, j-> ij", evecs, 1.0 / (-evals + (energy + efermi))) + # @ evecs.conj().T + # ) + return np.einsum( + "ib, b, jb-> ij", + evecs, + 1.0 / (-evals + (energy + efermi)), + evecs.conj(), + optimize=True, ) -def find_energy_ingap(evals, rbound, gap=4.0): +def find_energy_ingap(evals, rbound, gap=2.0): """ find a energy inside a gap below rbound (right bound), return the energy gap top - 0.5. @@ -46,8 +69,12 @@ class TBGreen: def __init__( self, tbmodel, - kmesh, # [ikpt, 3] - efermi, # efermi + kmesh=None, # [ikpt, 3] + ibz=False, # if True, will interpolate the Green's function at Ir-kpoints + efermi=None, # efermi + gamma=False, + kpts=None, + kweights=None, k_sym=False, use_cache=False, cache_path=None, @@ -67,18 +94,64 @@ def __init__( self.cache_path = cache_path if use_cache: self._prepare_cache() - if kmesh is not None: - self.kpts = monkhorst_pack(size=kmesh) - else: - self.kpts = tbmodel.get_kpts() - self.nkpts = len(self.kpts) - self.kweights = [1.0 / self.nkpts] * self.nkpts + self.prepare_kpts( + kmesh=kmesh, + ibz=ibz, + gamma=gamma, + kpts=kpts, + kweights=kweights, + tbmodel=tbmodel, + ) + self.norb = tbmodel.norb self.nbasis = tbmodel.nbasis self.k_sym = k_sym self.nproc = nproc self._prepare_eigen() + def prepare_kpts( + self, kmesh=None, gamma=True, ibz=False, kpts=None, kweights=None, tbmodel=None + ): + """ + Prepare the k-points for the calculation. + + Parameters: + - kmesh (tuple): The k-mesh used to generate k-points. + - gamma (bool): Whether to include the gamma point in the k-points. + - ibz (bool): Whether to use the irreducible Brillouin zone. + - kpts (list): List of user-defined k-points. + - kweights (list): List of weights for each k-point. + + Returns: + None + """ + if kpts is not None: + self.kpts = kpts + self.nkpts = len(self.kpts) + self.kweights = kweights + elif kmesh is not None: + if ibz: + self.kpts, self.kweights = ir_kpts( + atoms=tbmodel.atoms, + mp_grid=kmesh, + ir=True, + is_time_reversal=False, + ) + self.nkpts = len(self.kpts) + print(f"Using IBZ of kmesh of {kmesh}") + print(f"Number of kpts: {self.nkpts}") + for kpt, weight in zip(self.kpts, self.kweights): + # format the kpt and weight, use 5 digits + print(f"{kpt[0]:8.5f} {kpt[1]:8.5f} {kpt[2]:8.5f} {weight:8.5f}") + else: + self.kpts = monkhorst_pack(kmesh, gamma_center=gamma) + self.nkpts = len(self.kpts) + self.kweights = np.array([1.0 / self.nkpts] * self.nkpts) + else: + self.kpts = tbmodel.get_kpts() + self.nkpts = len(self.kpts) + self.kweights = np.array([1.0 / self.nkpts] * self.nkpts) + def _reduce_eigens(self, evals, evecs, emin, emax): ts = np.logical_and(evals >= emin, evals < emax) ts = np.any(ts, axis=0) @@ -90,7 +163,7 @@ def _reduce_eigens(self, evals, evecs, emin, emax): istart, iend = ts[0], ts[-1] + 1 return evals[:, istart:iend], evecs[:, :, istart:iend] - def find_energy_ingap(self, rbound, gap=4.0): + def find_energy_ingap(self, rbound, gap=2.0): return find_energy_ingap(self.evals, rbound, gap) def _prepare_cache(self): @@ -145,9 +218,26 @@ def _prepare_eigen(self, solve=True, saveH=False): if not saveH: self.H = None - self.evals, self.evecs = self._reduce_eigens( - self.evals, self.evecs, emin=self.efermi - 10.0, emax=self.efermi + 10.1 - ) + # get efermi + if self.efermi is None: + print("Calculating Fermi energy from eigenvalues") + print(f"Number of electrons: {self.tbmodel.nel} ") + + # occ = Occupations( + # nel=self.tbmodel.nel, width=0.1, wk=self.kweights, nspin=2 + # ) + # self.efermi = occ.efermi(copy.deepcopy(self.evals)) + # print(f"Fermi energy found: {self.efermi}") + + occ = GaussOccupations( + nel=self.tbmodel.nel, width=0.1, wk=self.kweights, nspin=2 + ) + self.efermi = occ.efermi(copy.deepcopy(self.evals)) + print(f"Fermi energy found: {self.efermi}") + + # self.evals, self.evecs = self._reduce_eigens( + # self.evals, self.evecs, emin=self.efermi - 15.0, emax=self.efermi + 10.1 + # ) if self._use_cache: evecs = self.evecs self.evecs_shape = self.evecs.shape @@ -216,20 +306,22 @@ def get_density_matrix(self): rho = np.zeros((self.nbasis, self.nbasis), dtype=complex) if self.is_orthogonal: for ik, _ in enumerate(self.kpts): + evecs_k = self.get_evecs(ik) + # chekc if any of the evecs element is nan rho += ( - (self.get_evecs(ik) * fermi(self.evals[ik], self.efermi)) - @ self.get_evecs(ik).T.conj() + (evecs_k * fermi(self.evals[ik], self.efermi, nspin=2)) + @ evecs_k.T.conj() * self.kweights[ik] ) else: for ik, _ in enumerate(self.kpts): rho += ( - (self.get_evecs(ik) * fermi(self.evals[ik], self.efermi)) + (self.get_evecs(ik) * fermi(self.evals[ik], self.efermi, nspin=2)) @ self.get_evecs(ik).T.conj() @ self.get_Sk(ik) * self.kweights[ik] ) - + # check if rho has nan values return rho def get_rho_R(self, Rlist): @@ -238,7 +330,10 @@ def get_rho_R(self, Rlist): for ik, kpt in enumerate(self.kpts): evec = self.get_evecs(ik) rhok = np.einsum( - "ib,b, bj-> ij", evec, fermi(self.evals[ik], self.efermi), evec.conj().T + "ib,b, bj-> ij", + evec, + fermi(self.evals[ik], self.efermi, nspin=2), + evec.conj().T, ) for iR, R in enumerate(Rlist): rho_R[iR] += rhok * np.exp(self.k2Rfactor * kpt @ R) * self.kweights[ik] @@ -252,16 +347,20 @@ def write_rho_R(self, Rlist, fname="rhoR.pickle"): def get_density(self): return np.real(np.diag(self.get_density_matrix())) - def get_Gk(self, ik, energy): + def get_Gk(self, ik, energy, evals=None, evecs=None): """Green's function G(k) for one energy G(\epsilon)= (\epsilon I- H)^{-1} :param ik: indices for kpoint :returns: Gk :rtype: a matrix of indices (nbasis, nbasis) """ + if evals is None: + evals = self.get_evalue(ik) + if evecs is None: + evecs = self.get_evecs(ik) Gk = eigen_to_G( - evals=self.get_evalue(ik), - evecs=self.get_evecs(ik), + evals=evals, + evecs=evecs, efermi=self.efermi, energy=energy, ) @@ -270,7 +369,14 @@ def get_Gk(self, ik, energy): # Gk = np.linalg.inv((energy+self.efermi)*self.S[ik,:,:] - self.H[ik,:,:]) return Gk - def get_GR(self, Rpts, energy, get_rho=False): + def get_Gk_all(self, energy): + """Green's function G(k) for one energy for all kpoints""" + Gk_all = np.zeros((self.nkpts, self.nbasis, self.nbasis), dtype=complex) + for ik, _ in enumerate(self.kpts): + Gk_all[ik] = self.get_Gk(ik, energy) + return Gk_all + + def get_GR(self, Rpts, energy, get_rho=False, Gk_all=None): """calculate real space Green's function for one energy, all R points. G(R, epsilon) = G(k, epsilon) exp(-2\pi i R.dot. k) :param Rpts: R points @@ -282,7 +388,10 @@ def get_GR(self, Rpts, energy, get_rho=False): GR = defaultdict(lambda: 0.0j) rhoR = defaultdict(lambda: 0.0j) for ik, kpt in enumerate(self.kpts): - Gk = self.get_Gk(ik, energy) + if Gk_all is None: + Gk = self.get_Gk(ik, energy) + else: + Gk = Gk_all[ik] if get_rho: if self.is_orthogonal: rhok = Gk @@ -315,7 +424,6 @@ def get_GR_and_dGRdx1(self, Rpts, energy, dHdx): for iR, R in enumerate(Rpts): phase = np.exp(self.k2Rfactor * np.dot(R, kpt)) GR[R] += Gkw * (phase * self.kweights[ik]) - dHRdx = dHdx.get_hamR(R) dGRdx[R] += Gkw @ dHRdx @ Gk # dGRdx[R] += Gk.dot(dHRdx).dot(Gkp) diff --git a/TB2J/interfaces/__init__.py b/TB2J/interfaces/__init__.py new file mode 100644 index 0000000..aa5982f --- /dev/null +++ b/TB2J/interfaces/__init__.py @@ -0,0 +1,12 @@ +from .abacus import gen_exchange_abacus +from .manager import Manager +from .siesta_interface import gen_exchange_siesta +from .wannier90_interface import WannierManager, gen_exchange + +__all__ = [ + "Manager", + "gen_exchange_siesta", + "WannierManager", + "gen_exchange", + "gen_exchange_abacus", +] diff --git a/TB2J/abacus/.gitignore b/TB2J/interfaces/abacus/.gitignore similarity index 100% rename from TB2J/abacus/.gitignore rename to TB2J/interfaces/abacus/.gitignore diff --git a/TB2J/interfaces/abacus/__init__.py b/TB2J/interfaces/abacus/__init__.py new file mode 100644 index 0000000..418a6a3 --- /dev/null +++ b/TB2J/interfaces/abacus/__init__.py @@ -0,0 +1,4 @@ +from .abacus_wrapper import AbacusParser, AbacusWrapper +from .gen_exchange_abacus import gen_exchange_abacus + +__all__ = ["AbacusWrapper", "AbacusParser", "gen_exchange_abacus"] diff --git a/TB2J/abacus/abacus_api.py b/TB2J/interfaces/abacus/abacus_api.py similarity index 99% rename from TB2J/abacus/abacus_api.py rename to TB2J/interfaces/abacus/abacus_api.py index 48076a5..bd83745 100644 --- a/TB2J/abacus/abacus_api.py +++ b/TB2J/interfaces/abacus/abacus_api.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -import numpy as np -from scipy import sparse -from scipy.sparse import csr_matrix import re import struct +import numpy as np +from scipy.sparse import csr_matrix + class XR_matrix: def __init__(self, nspin, XR_fileName): diff --git a/TB2J/abacus/abacus_wrapper.py b/TB2J/interfaces/abacus/abacus_wrapper.py similarity index 72% rename from TB2J/abacus/abacus_wrapper.py rename to TB2J/interfaces/abacus/abacus_wrapper.py index 10dde17..5fd189d 100644 --- a/TB2J/abacus/abacus_wrapper.py +++ b/TB2J/interfaces/abacus/abacus_wrapper.py @@ -3,21 +3,29 @@ """ The abacus wrapper """ -from pathlib import Path + import os +from pathlib import Path + import numpy as np from scipy.linalg import eigh -from TB2J.utils import symbol_number_list + +from TB2J.mathutils.rotate_spin import rotate_Matrix_from_z_to_spherical from TB2J.myTB import AbstractTB -from TB2J.abacus.abacus_api import read_HR_SR -from TB2J.abacus.orbital_api import parse_abacus_orbital -from TB2J.abacus.stru_api import read_abacus, read_abacus_out +from TB2J.utils import symbol_number_list + +from .abacus_api import read_HR_SR +from .orbital_api import parse_abacus_orbital +from .stru_api import read_abacus class AbacusWrapper(AbstractTB): - def __init__(self, HR, SR, Rlist, nbasis, nspin=1): + def __init__( + self, HR, SR, Rlist, nbasis, nspin=1, HR_soc=None, HR_nosoc=None, nel=None + ): self.R2kfactor = 2j * np.pi self.is_orthogonal = False + self.split_soc = False self._name = "ABACUS" self._HR = HR self.SR = SR @@ -25,11 +33,38 @@ def __init__(self, HR, SR, Rlist, nbasis, nspin=1): self.nbasis = nbasis self.nspin = nspin self.norb = nbasis * nspin + self.nel = nel self._build_Rdict() + if HR_soc is not None: + self.set_HR_soc(HR_soc=HR_soc, HR_nosoc=HR_nosoc, HR_full=HR) + self.soc_rotation_angle = 0.0 + + def set_HR_soc(self, HR_soc=None, HR_nosoc=None, HR_full=None): + self.split_soc = True + self.HR_soc = HR_soc + if HR_nosoc is not None: + self.HR_nosoc = HR_nosoc + if HR_full is not None: + self.HR_nosoc = HR_full - HR_soc + + def set_Hsoc_rotation_angle(self, angle): + """ + Set the rotation angle for SOC part of Hamiltonian + """ + self.soc_rotation_angle = angle @property def HR(self): - return self._HR + if self.split_soc: + _HR = np.zeros_like(self.HR_soc) + for iR, _ in enumerate(self.Rlist): + theta, phi = self.soc_rotation_angle + _HR[iR] = self.HR_nosoc[iR] + rotate_Matrix_from_z_to_spherical( + self.HR_soc[iR], theta, phi + ) + return _HR + else: + return self._HR @HR.setter def set_HR(self, HR): @@ -124,6 +159,9 @@ def __init__(self, spin=None, outpath=None, binary=False): # read the information self.read_atoms() self.efermi = self.read_efermi() + self.nel = self.read_nel() + print(f"efermi: {self.efermi}") + print(f"nel: {self.nel}") self.read_basis() def read_spin(self): @@ -154,6 +192,7 @@ def read_atoms(self): def read_basis(self): fname = str(Path(self.outpath) / "Orbital") self.basis = parse_abacus_orbital(fname) + print(self.basis) return self.basis def read_HSR_collinear(self, binary=None): @@ -214,6 +253,20 @@ def read_efermi(self): raise ValueError(f"EFERMI not found in the {str(fname)} file.") return efermi + def read_nel(self): + """ + Reading the number of electrons from the scf log file. + """ + fname = str(Path(self.outpath) / "running_scf.log") + nel = None + with open(fname, "r") as myfile: + for line in myfile: + if "number of electrons" in line: + nel = float(line.split()[-1]) + if nel is None: + raise ValueError(f"number of electron not found in the {str(fname)} file.") + return nel + def get_basis(self): slist = symbol_number_list(self.atoms) if self.spin == "collinear": @@ -231,6 +284,43 @@ def get_basis(self): return basis +class AbacusSplitSOCParser: + """ + Abacus parser with Hamiltonian split to SOC and non-SOC parts + """ + + def __init__(self, outpath_nosoc=None, outpath_soc=None, binary=False): + self.outpath_nosoc = outpath_nosoc + self.outpath_soc = outpath_soc + self.binary = binary + self.parser_nosoc = AbacusParser(outpath=outpath_nosoc, binary=binary) + self.parser_soc = AbacusParser(outpath=outpath_soc, binary=binary) + spin1 = self.parser_nosoc.read_spin() + spin2 = self.parser_soc.read_spin() + if spin1 != "noncollinear" or spin2 != "noncollinear": + raise ValueError("Spin should be noncollinear") + + def parse(self): + nbasis, Rlist, HR_nosoc, SR = self.parser_nosoc.Read_HSR_noncollinear() + nbasis2, Rlist2, HR2, SR2 = self.parser_soc.Read_HSR_noncollinear() + # print(HR[0]) + HR_soc = HR2 - HR_nosoc + model = AbacusWrapper( + HR=None, + SR=SR, + Rlist=Rlist, + nbasis=nbasis, + nspin=2, + HR_soc=HR_soc, + HR_nosoc=HR_nosoc, + nel=self.parser_nosoc.nel, + ) + model.efermi = self.parser_soc.efermi + model.basis = self.parser_nosoc.basis + model.atoms = self.parser_nosoc.atoms + return model + + def test_abacus_wrapper_collinear(): outpath = "/Users/hexu/projects/TB2J_abacus/abacus-tb2j-master/abacus_example/case_Fe/1_no_soc/OUT.Fe" parser = AbacusParser(outpath=outpath, spin=None, binary=False) @@ -239,20 +329,20 @@ def test_abacus_wrapper_collinear(): # parser.read_HSR_collinear() model_up, model_dn = parser.get_models() H, S, E, V = model_up.HSE_k([0, 0, 0]) - # print(H.shape) # print(H.diagonal().real) # print(model_up.get_HR0().diagonal().real) print(parser.efermi) + print(atoms) def test_abacus_wrapper_ncl(): outpath = "/Users/hexu/projects/TB2J_abacus/abacus-tb2j-master/abacus_example/case_Fe/2_soc/OUT.Fe" - parser = AbacusParser(outpath=outpath, spin=None, binary=False) atoms = parser.read_atoms() model = parser.get_models() H, S, E, V = model.HSE_k([0, 0, 0]) print(parser.efermi) + print(atoms) if __name__ == "__main__": diff --git a/TB2J/abacus/gen_exchange_abacus.py b/TB2J/interfaces/abacus/gen_exchange_abacus.py similarity index 88% rename from TB2J/abacus/gen_exchange_abacus.py rename to TB2J/interfaces/abacus/gen_exchange_abacus.py index 2a0e45d..0a81219 100644 --- a/TB2J/abacus/gen_exchange_abacus.py +++ b/TB2J/interfaces/abacus/gen_exchange_abacus.py @@ -6,8 +6,11 @@ import os from pathlib import Path -from TB2J.abacus.abacus_wrapper import AbacusParser -from TB2J.exchange import ExchangeNCL, ExchangeCL + +# from TB2J.abacus.abacus_wrapper import AbacusParser +from HamiltonIO.abacus import AbacusParser + +from TB2J.exchange import ExchangeNCL from TB2J.exchangeCL2 import ExchangeCL2 @@ -24,7 +27,7 @@ def gen_exchange_abacus( exclude_orbs=[], Rcut=None, use_cache=False, - np=1, + nproc=1, output_path="TB2J_results", orb_decomposition=False, description=None, @@ -57,14 +60,15 @@ def gen_exchange_abacus( nz=nz, exclude_orbs=exclude_orbs, Rcut=Rcut, - np=np, + nproc=nproc, use_cache=use_cache, output_path=output_path, + orb_decomposition=orb_decomposition, description=description, ) exchange.run(path=output_path) print("\n") - print(f"All calculation finsihed. The results are in {output_path} directory.") + print(f"All calculation finished. The results are in {output_path} directory.") else: tbmodel = parser.get_models() print("Starting to calculate exchange.") @@ -83,8 +87,9 @@ def gen_exchange_abacus( nz=nz, exclude_orbs=exclude_orbs, Rcut=Rcut, - np=np, + nproc=nproc, use_cache=use_cache, + orb_decomposition=orb_decomposition, description=description, ) exchange.run() diff --git a/TB2J/abacus/orbital_api.py b/TB2J/interfaces/abacus/orbital_api.py similarity index 86% rename from TB2J/abacus/orbital_api.py rename to TB2J/interfaces/abacus/orbital_api.py index dc043f3..97605d7 100644 --- a/TB2J/abacus/orbital_api.py +++ b/TB2J/interfaces/abacus/orbital_api.py @@ -3,11 +3,11 @@ """ Parser for the abacus orbital file """ -from pathlib import Path -import numpy as np + from dataclasses import dataclass -from collections import namedtuple -from TB2J.utils import symbol_number, symbol_number_list +from pathlib import Path + +from TB2J.utils import symbol_number_list @dataclass @@ -16,13 +16,14 @@ class AbacusOrbital: Orbital class """ - iatom: int - sym: str - spin: int - element: str - l: int - m: int - z: int + iatom: int = 0 + sym: str = "" + spin: int = 0 + element: str = "" + n: int = 0 + l: int = 0 + m: int = 0 + z: int = 0 def parse_abacus_orbital(fname): @@ -38,10 +39,11 @@ def parse_abacus_orbital(fname): iatom, element, l, m, z, sym = seg iatom = int(iatom) ispin = 0 + n = 0 l = int(l) m = int(m) z = int(z) - orbs.append(AbacusOrbital(iatom, sym, ispin, element, l, m, z)) + orbs.append(AbacusOrbital(iatom, sym, ispin, element, n, l, m, z)) line = myfile.readline() return orbs diff --git a/TB2J/abacus/stru_api.py b/TB2J/interfaces/abacus/stru_api.py similarity index 99% rename from TB2J/abacus/stru_api.py rename to TB2J/interfaces/abacus/stru_api.py index 1b00c3f..18900c5 100644 --- a/TB2J/abacus/stru_api.py +++ b/TB2J/interfaces/abacus/stru_api.py @@ -8,17 +8,17 @@ @author: Ji Yu-yang """ -import re -import warnings -import numpy as np import os +import re import shutil +import warnings from pathlib import Path +import numpy as np from ase import Atoms -from ase.units import Bohr, Hartree, GPa, mol, _me, Rydberg -from ase.utils import lazymethod, lazyproperty, reader, writer from ase.calculators.singlepoint import SinglePointDFTCalculator, arrays_to_kpoints +from ase.units import Bohr, GPa, Hartree, Rydberg, _me, mol +from ase.utils import lazymethod, lazyproperty, reader, writer _re_float = r"[-+]?\d+\.*\d*(?:[Ee][-+]\d+)?" AU_to_MASS = mol * _me * 1e3 @@ -697,7 +697,7 @@ def read_abacus(fd, latname=None, verbose=False): ) symbols = specie_lines[:, 0] ntype = len(symbols) - mass = specie_lines[:, 1].astype(float) + # mass = specie_lines[:, 1].astype(float) try: atom_potential = dict(zip(symbols, specie_lines[:, 2].tolist())) except IndexError: @@ -789,7 +789,7 @@ def read_abacus(fd, latname=None, verbose=False): # symbols, magnetism sym = [symbol] * number - masses = [mass] * number + # masses = [mass] * number atom_mags = [float(sub_block.group(1))] * number for j in range(number): atom_symbol.append(sym[j]) @@ -1121,7 +1121,7 @@ def _parse_k_points(self): def str_to_kpoints(val_in): lines = ( re.search( - rf"KPOINTS\s*DIRECT_X\s*DIRECT_Y\s*DIRECT_Z\s*WEIGHT([\s\S]+?)DONE", + r"KPOINTS\s*DIRECT_X\s*DIRECT_Y\s*DIRECT_Z\s*WEIGHT([\s\S]+?)DONE", val_in, ) .group(1) @@ -1349,7 +1349,7 @@ def _parse_efermi(self): def _parse_ionic_block(self): """Parse the ionic block from the output file""" step_pattern = re.compile( - rf"(?:[NON]*SELF-|STEP OF|RELAX CELL)([\s\S]+?)charge density convergence is achieved" + r"(?:[NON]*SELF-|STEP OF|RELAX CELL)([\s\S]+?)charge density convergence is achieved" ) return step_pattern.findall(self.contents) @@ -1384,7 +1384,7 @@ def _parse_cell_relaxation_convergency(self): == "Lattice relaxation is converged!" ) res = np.zeros((self.ion_steps), dtype=bool) - if lat_arr[-1] == True: + if lat_arr[-1]: res[-1] = 1 return res.astype(bool) @@ -1805,7 +1805,7 @@ def magmom(self): @lazyproperty def n_iter(self): """The number of SCF iterations needed to converge the SCF cycle for the chunk""" - step_pattern = re.compile(rf"ELEC\s*=\s*[+]?(\d+)") + step_pattern = re.compile(r"ELEC\s*=\s*[+]?(\d+)") try: return int(step_pattern.findall(self._ionic_block)[-1]) diff --git a/TB2J/abacus/test_density_matrix.py b/TB2J/interfaces/abacus/test_density_matrix.py similarity index 100% rename from TB2J/abacus/test_density_matrix.py rename to TB2J/interfaces/abacus/test_density_matrix.py index 589d901..298a2f9 100644 --- a/TB2J/abacus/test_density_matrix.py +++ b/TB2J/interfaces/abacus/test_density_matrix.py @@ -1,5 +1,5 @@ -from scipy.linalg import eigh import numpy as np +from scipy.linalg import eigh def gen_random_hermitean_matrix(n): diff --git a/TB2J/abacus/test_read_HRSR.py b/TB2J/interfaces/abacus/test_read_HRSR.py similarity index 98% rename from TB2J/abacus/test_read_HRSR.py rename to TB2J/interfaces/abacus/test_read_HRSR.py index c63e723..481647d 100644 --- a/TB2J/abacus/test_read_HRSR.py +++ b/TB2J/interfaces/abacus/test_read_HRSR.py @@ -1,5 +1,4 @@ from abacus_api import read_HR_SR -import numpy as np def main(): diff --git a/TB2J/abacus/test_read_stru.py b/TB2J/interfaces/abacus/test_read_stru.py similarity index 79% rename from TB2J/abacus/test_read_stru.py rename to TB2J/interfaces/abacus/test_read_stru.py index c9dded7..22060b4 100644 --- a/TB2J/abacus/test_read_stru.py +++ b/TB2J/interfaces/abacus/test_read_stru.py @@ -3,15 +3,17 @@ """ @File : test_read_stru.py @Time : 2024/02/02 09:52:23 -@Author : Shen Zhen-Xiong +@Author : Shen Zhen-Xiong @Email : shenzx@iai.ustc.edu.cn """ + import os -from stru_api import read_abacus, write_abacus, read_input + +from stru_api import read_abacus, read_input, write_abacus def main(): - stru_fe = read_abacus(os.path.join(os.getcwd(), "input/Fe.STRU")) + # stru_fe = read_abacus(os.path.join(os.getcwd(), "input/Fe.STRU")) stru_sr2mn2o6 = read_abacus( os.path.join(os.getcwd(), "input/Sr2Mn2O6.STRU"), verbose=True ) diff --git a/TB2J/interfaces/gpaw_interface.py b/TB2J/interfaces/gpaw_interface.py new file mode 100644 index 0000000..bdc774c --- /dev/null +++ b/TB2J/interfaces/gpaw_interface.py @@ -0,0 +1,54 @@ +import os + +import numpy as np + +from TB2J.exchange import ExchangeNCL +from TB2J.gpaw_wrapper import GPAWWrapper +from TB2J.utils import auto_assign_basis_name + + +def gen_exchange_gpaw( + gpw_fname, + magnetic_elements=[], + kmesh=[3, 3, 3], + emin=-12.0, + emax=0.0, + nz=50, + exclude_orbs=[], + Rcut=None, + use_cache=False, + output_path="TB2J_results", + description="", +): + print("Reading from GPAW data and calculate electronic structure.") + model = GPAWWrapper(gpw_fname=gpw_fname) + efermi = model.calc.get_fermi_level() + print(f"Fermi Energy: {efermi}") + poses = np.vstack([model.positions, model.positions]) + basis, _ = auto_assign_basis_name( + poses, + model.atoms, + write_basis_file=os.path.join(output_path, "assigned_basis.txt"), + ) + + if model.calc.get_spin_polarized(): + print("Starting to calculate exchange.") + exchange = ExchangeNCL( + tbmodels=model, + atoms=model.atoms, + efermi=efermi, + basis=basis, + magnetic_elements=magnetic_elements, + kmesh=kmesh, + emin=emin, + emax=emax, + nz=nz, + exclude_orbs=exclude_orbs, + Rcut=Rcut, + use_cache=use_cache, + output_path=output_path, + description=description, + ) + exchange.run(path=output_path) + print("\n") + print(f"All calculation finished. The results are in {output_path} directory.") diff --git a/TB2J/interfaces/lawaf_interface.py b/TB2J/interfaces/lawaf_interface.py new file mode 100644 index 0000000..8a99998 --- /dev/null +++ b/TB2J/interfaces/lawaf_interface.py @@ -0,0 +1,129 @@ +import argparse +import os +import sys + +from HamiltonIO.lawaf import LawafHamiltonian as Ham + +from TB2J.exchange_params import add_exchange_args_to_parser, parser_argument_to_dict +from TB2J.versioninfo import print_license + +from .manager import Manager + + +class LaWaFManager(Manager): + def __init__( + self, + path, + prefix_up, + prefix_dn, + prefix_SOC, + colinear=True, + wannier_type="LaWaF", + **kwargs, + ): + # models and basis + if colinear: + tbmodels, basis, atoms = self.prepare_model_colinear( + path, prefix_up, prefix_dn + ) + else: + tbmodels, basis, atoms = self.prepare_model_ncl(path, prefix_SOC) + + description = self.description(path, prefix_up, prefix_dn, prefix_SOC, colinear) + kwargs["description"] = description + + super().__init__(atoms, tbmodels, basis, colinear=colinear, **kwargs) + + def prepare_model_colinear(self, path, prefix_up, prefix_dn): + print("Reading LawaF hamiltonian: spin up.") + tbmodel_up = Ham.load_pickle(os.path.join(path, f"{prefix_up}.pickle")) + print("Reading LaWaF hamiltonian: spin down.") + tbmodel_dn = Ham.load_pickle(os.path.join(path, f"{prefix_dn}.pickle")) + tbmodels = (tbmodel_up, tbmodel_dn) + basis = tbmodel_up.wann_names + atoms = tbmodel_up.atoms + return tbmodels, basis, atoms + + def prepare_model_ncl(self, path, prefix_SOC): + print("Reading LaWaF hamiltonian: non-colinear spin.") + tbmodel = Ham.load_pickle(os.path.join(path, f"{prefix_SOC}.pickle")) + basis = tbmodel.wann_names + atoms = tbmodel.atoms + return tbmodel, basis, atoms + + def description(self, path, prefix_up, prefix_dn, prefix_SOC, colinear=True): + if colinear: + description = f""" Input from collinear LaWaF data. + Tight binding data from {path}. + Prefix of wannier function files:{prefix_up} and {prefix_dn}. +Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. +\n""" + + else: + description = f""" Input from non-collinear LaWaF data. +Tight binding data from {path}. +Prefix of wannier function files:{prefix_SOC}. +Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. +The DMI component parallel to the spin orientation, the Jani which has the component of that orientation should be disregarded +e.g. if the spins are along z, the xz, yz, zz, zx, zy components and the z component of DMI. +If you need these component, try to do three calculations with spin along x, y, z, or use structure with z rotated to x, y and z. And then use TB2J_merge.py to get the full set of parameters.\n""" + + return description + + +def lawaf2J_cli(): + print_license() + parser = argparse.ArgumentParser( + description="lawaf2J: Using magnetic force theorem to calculate exchange parameter J from wannier functions in LaWaF" + ) + parser.add_argument( + "--path", help="path to the wannier files", default="./", type=str + ) + + parser.add_argument( + "--spinor", + help="if the calculation is spinor, default is False", + default=False, + action="store_true", + ) + + parser.add_argument( + "--prefix_spinor", + help="prefix to the spinor wannier files", + default="lawaf_spinor.pickle", + type=str, + ) + parser.add_argument( + "--prefix_up", + help="prefix to the spin up wannier files", + default="lawaf_up", + type=str, + ) + parser.add_argument( + "--prefix_down", + help="prefix to the spin down wannier files", + default="lawaf_dn", + type=str, + ) + add_exchange_args_to_parser(parser) + + args = parser.parse_args() + + if args.efermi is None: + print("Please input fermi energy using --efermi ") + sys.exit() + if args.elements is None: + print("Please input the magnetic elements, e.g. --elements Fe Ni") + sys.exit() + + kwargs = parser_argument_to_dict(args) + + manager = LaWaFManager( + path=args.path, + prefix_up=args.prefix_up, + prefix_dn=args.prefix_down, + prefix_SOC=args.prefix_spinor, + colinear=not args.spinor, + **kwargs, + ) + return manager diff --git a/TB2J/interfaces/manager.py b/TB2J/interfaces/manager.py new file mode 100644 index 0000000..8036208 --- /dev/null +++ b/TB2J/interfaces/manager.py @@ -0,0 +1,23 @@ +from TB2J.exchange import ExchangeNCL +from TB2J.exchange_qspace import ExchangeCLQspace +from TB2J.exchangeCL2 import ExchangeCL2 + + +class Manager: + def __init__(self, atoms, models, basis, colinear, **kwargs): + # computing exchange + print("Starting to calculate exchange.") + ExchangeClass = self.select_exchange(colinear) + exchange = ExchangeClass(tbmodels=models, atoms=atoms, basis=basis, **kwargs) + output_path = kwargs.get("output_path", "TB2J_results") + exchange.run(path=output_path) + print(f"All calculation finished. The results are in {output_path} directory.") + + def select_exchange(self, colinear, qspace=False): + if colinear: + if qspace: + return ExchangeCLQspace + else: + return ExchangeCL2 + else: + return ExchangeNCL diff --git a/TB2J/interfaces/siesta_interface.py b/TB2J/interfaces/siesta_interface.py new file mode 100644 index 0000000..50e9d47 --- /dev/null +++ b/TB2J/interfaces/siesta_interface.py @@ -0,0 +1,193 @@ +import os + +import numpy as np + +from TB2J.exchange import ExchangeNCL +from TB2J.exchangeCL2 import ExchangeCL2 +from TB2J.MAEGreen import MAEGreen + +try: + from HamiltonIO.siesta import SislParser +except ImportError: + print( + "Cannot import SislWrapper from HamiltonIO.siesta. Please install HamiltonIO first." + ) + + +def siesta_anisotropy(**kwargs): + pass + + +def gen_exchange_siesta(fdf_fname, read_H_soc=False, **kwargs): + """ + parameters: + fdf_fname: str + The fdf file for the calculation. + read_H_soc: bool + Whether to read the SOC Hamiltonian. Default is False. + + parameters in **kwargs: + magnetic_elements: list of str + The magnetic elements to calculate the exchange. e.g. ["Fe", "Co", "Ni"] + include_orbs: dict + The included orbitals for each magnetic element. e.g. {"Fe": ["d"]}. Default is None. + kmesh: list of int + The k-point mesh for the calculation. e.g. [5, 5, 5]. Default is [5, 5, 5]. + emin: float + The minimum energy for the calculation. Default is -12.0. + emax: float + The maximum energy for the calculation. Default is 0.0. + nz: int + The number of points for the energy mesh. Default is 100. + exclude_orbs: list of str + The excluded orbitals for the calculation. Default is []. + Rcut: float + The cutoff radius for the exchange calculation in angstrom. Default is None. + ne: int + The number of electrons for the calculation. Default is None. + nproc: int + The number of processors for the calculation. Default is 1. + use_cache: bool + Whether to use the cache for the calculation. Default is False. + output_path: str + The output path for the calculation. Default is "TB2J_results". + orb_decomposition: bool + Whether to calculate the orbital decomposition. Default is False. + orth: bool + Whether to orthogonalize the orbitals. Default is False. + description: str + The description for the calculation. Default is "". + """ + + exargs = dict( + magnetic_elements=[], + include_orbs=None, + kmesh=[5, 5, 5], + emin=-12.0, + emax=0.0, + nz=100, + exclude_orbs=[], + Rcut=None, + ne=None, + nproc=1, + use_cache=False, + output_path="TB2J_results", + orb_decomposition=False, + orth=False, + ibz=False, + description="", + ) + exargs.update(kwargs) + try: + import sisl + except ImportError: + raise ImportError("sisl cannot be imported. Please install sisl first.") + + from packaging import version + + if version.parse(sisl.__version__) <= version.parse("0.10.0"): + raise ImportError( + f"sisl version is {sisl.__version__}, but should be larger than 0.10.0." + ) + + output_path = exargs.pop("output_path") + + parser = SislParser( + fdf_fname=fdf_fname, ispin=None, read_H_soc=read_H_soc, orth=exargs["orth"] + ) + if parser.spin.is_colinear: + print("Reading Siesta hamiltonian: colinear spin.") + # tbmodel_up = SislWrapper(fdf_fname=None, sisl_hamiltonian=H, spin=0, geom=geom) + # tbmodel_dn = SislWrapper(fdf_fname=None, sisl_hamiltonian=H, spin=1, geom=geom) + tbmodel_up, tbmodel_dn = parser.get_model() + basis = dict(zip(tbmodel_up.orbs, list(range(tbmodel_up.norb)))) + print("Starting to calculate exchange.") + description = f""" Input from collinear Siesta data. + working directory: {os.getcwd()} + fdf_fname: {fdf_fname}. +\n""" + exchange = ExchangeCL2( + tbmodels=(tbmodel_up, tbmodel_dn), + atoms=tbmodel_up.atoms, + basis=basis, + efermi=0.0, + # magnetic_elements=exargs['magnetic_elements'], + # include_orbs=ex + **exargs, + ) + exchange.run(path=output_path) + print("\n") + print(f"All calculation finished. The results are in {output_path} directory.") + + elif parser.spin.is_spinorbit or parser.spin.is_noncolinear: + print("Reading Siesta hamiltonian: non-colinear spin.") + model = parser.get_model() + basis = dict(zip(model.orbs, list(range(model.nbasis)))) + print("Starting to calculate exchange.") + description = f""" Input from non-collinear Siesta data. + working directory: {os.getcwd()} + fdf_fname: {fdf_fname}. +Warning: The DMI component parallel to the spin orientation, the Jani which has the component of that orientation should be disregarded + e.g. if the spins are along z, the xz, yz, zz, zx, zy components and the z component of DMI. + If you need these component, try to do three calculations with spin along x, y, z, or use structure with z rotated to x, y and z. And then use TB2J_merge.py to get the full set of parameters. +\n""" + exargs["description"] = description + if not model.split_soc: + exchange = ExchangeNCL( + tbmodels=model, + atoms=model.atoms, + basis=basis, + efermi=0.0, + # magnetic_elements=magnetic_elements, + # include_orbs=include_orbs, + output_path=output_path, + **exargs, + ) + exchange.run(path=output_path) + print("\n") + print( + f"All calculation finished. The results are in {output_path} directory." + ) + else: + print("Starting to calculate MAE.") + model.set_so_strength(0.0) + MAE = MAEGreen( + tbmodels=model, + atoms=model.atoms, + basis=basis, + efermi=None, + angles="miller", + # magnetic_elements=magnetic_elements, + # include_orbs=include_orbs, + **exargs, + ) + # thetas = [0, np.pi / 2, np.pi, 3 * np.pi / 2] + # phis = [0, 0, 0, 0] + # MAE.set_angles(thetas=thetas, phis=phis) + MAE.run(output_path=f"{output_path}_anisotropy", with_eigen=False) + print( + f"MAE calculation finished. The results are in {output_path} directory." + ) + + angle = {"x": (np.pi / 2, 0), "y": (np.pi / 2, np.pi / 2), "z": (0, 0)} + for key, val in angle.items(): + # model = parser.get_model() + theta, phi = val + model.set_so_strength(1.0) + model.set_Hsoc_rotation_angle([theta, phi]) + basis = dict(zip(model.orbs, list(range(model.nbasis)))) + output_path_full = f"{output_path}_{key}" + exchange = ExchangeNCL( + tbmodels=model, + atoms=model.atoms, + basis=basis, + efermi=None, # set to None, compute from efermi. + # magnetic_elements=magnetic_elements, + # include_orbs=include_orbs, + **exargs, + ) + exchange.run(path=output_path_full) + print("\n") + print( + f"All calculation finished. The results are in {output_path_full} directory." + ) diff --git a/TB2J/interfaces/wannier90_interface.py b/TB2J/interfaces/wannier90_interface.py new file mode 100644 index 0000000..a7ba43f --- /dev/null +++ b/TB2J/interfaces/wannier90_interface.py @@ -0,0 +1,115 @@ +import os + +from ase.io import read + +from TB2J.myTB import MyTB +from TB2J.utils import auto_assign_basis_name +from TB2J.wannier import parse_atoms + +from .manager import Manager + + +class WannierManager(Manager): + def __init__( + self, + path, + prefix_up, + prefix_dn, + prefix_SOC, + colinear=True, + groupby="spin", + posfile=None, + basis_fname=None, + qspace=False, + wannier_type="wannier90", + **kwargs, + ): + # atoms + atoms = self.prepare_atoms(path, posfile, prefix_up, prefix_SOC, colinear) + output_path = kwargs.get("output_path", "TB2J_results") + # models and basis + if colinear: + tbmodels, basis = self.prepare_model_colinear( + path, prefix_up, prefix_dn, atoms, output_path=output_path + ) + else: + tbmodels, basis = self.prepare_model_ncl( + path, prefix_SOC, atoms, groupby, output_path=output_path + ) + + description = self.description(path, prefix_up, prefix_dn, prefix_SOC, colinear) + kwargs["description"] = description + + super().__init__(atoms, tbmodels, basis, colinear=colinear, **kwargs) + + def prepare_atoms(self, path, posfile, prefix_up, prefix_SOC, colinear=True): + fname = os.path.join(path, posfile) + try: + print(f"Reading atomic structure from file {fname}.") + atoms = read(os.path.join(path, posfile)) + except Exception: + print( + f"Cannot read atomic structure from file {fname}. Trying to read from Wannier input." + ) + if colinear: + fname = os.path.join(path, f"{prefix_up}.win") + else: + fname = os.path.join(path, f"{prefix_SOC}.win") + + print(f"Reading atomic structure from file {fname}.") + atoms = parse_atoms(fname) + return atoms + + def prepare_model_colinear(self, path, prefix_up, prefix_dn, atoms, output_path): + print("Reading Wannier90 hamiltonian: spin up.") + tbmodel_up = MyTB.read_from_wannier_dir( + path=path, prefix=prefix_up, atoms=atoms, nls=False + ) + print("Reading Wannier90 hamiltonian: spin down.") + tbmodel_dn = MyTB.read_from_wannier_dir( + path=path, prefix=prefix_dn, atoms=atoms, nls=False + ) + basis, _ = auto_assign_basis_name( + tbmodel_up.xred, + atoms, + write_basis_file=os.path.join(output_path, "assigned_basis.txt"), + ) + tbmodels = (tbmodel_up, tbmodel_dn) + return tbmodels, basis + + def prepare_model_ncl(self, path, prefix_SOC, atoms, groupby, output_path): + print("Reading Wannier90 hamiltonian: non-colinear spin.") + groupby = groupby.lower().strip() + if groupby not in ["spin", "orbital"]: + raise ValueError("groupby can only be spin or orbital.") + tbmodel = MyTB.read_from_wannier_dir( + path=path, prefix=prefix_SOC, atoms=atoms, groupby=groupby, nls=True + ) + basis, _ = auto_assign_basis_name( + tbmodel.xred, + atoms, + write_basis_file=os.path.join(output_path, "assigned_basis.txt"), + ) + return tbmodel, basis + + def description(self, path, prefix_up, prefix_dn, prefix_SOC, colinear=True): + if colinear: + description = f""" Input from collinear Wannier90 data. + Tight binding data from {path}. + Prefix of wannier function files:{prefix_up} and {prefix_dn}. +Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. +\n""" + + else: + description = f""" Input from non-collinear Wannier90 data. +Tight binding data from {path}. +Prefix of wannier function files:{prefix_SOC}. +Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. +The DMI component parallel to the spin orientation, the Jani which has the component of that orientation should be disregarded +e.g. if the spins are along z, the xz, yz, zz, zx, zy components and the z component of DMI. +If you need these component, try to do three calculations with spin along x, y, z, or use structure with z rotated to x, y and z. And then use TB2J_merge.py to get the full set of parameters.\n""" + + return description + + +gen_exchange = WannierManager diff --git a/TB2J/io_exchange/io_exchange.py b/TB2J/io_exchange/io_exchange.py index 27b5003..43eb820 100644 --- a/TB2J/io_exchange/io_exchange.py +++ b/TB2J/io_exchange/io_exchange.py @@ -9,16 +9,23 @@ """ import os +import pickle from collections.abc import Iterable +from datetime import datetime + +import matplotlib + +matplotlib.use("Agg") +import gc + +import matplotlib.pyplot as plt import numpy as np -from TB2J.kpoints import monkhorst_pack -import pickle + from TB2J import __version__ +from TB2J.io_exchange.io_txt import write_Jq_info from TB2J.Jtensor import combine_J_tensor -from datetime import datetime -import matplotlib.pyplot as plt +from TB2J.kpoints import monkhorst_pack from TB2J.spinham.spin_api import SpinModel -from TB2J.io_exchange.io_txt import write_Jq_info from TB2J.utils import symbol_number @@ -238,6 +245,18 @@ def get_spin_iatom(self, iatom): def get_charge_iatom(self, iatom): return self.charges[iatom] + def ijR_index_spin_to_atom(self, i, j, R): + return (self.iatom(i), self.iatom(j), R) + + def ijR_index_atom_to_spin(self, iatom, jatom, R): + return (self.index_spin[iatom], self.index_spin[jatom], R) + + def ijR_list(self): + return [(i, j, R) for R, i, j in self.exchange_Jdict] + + def ijR_list_index_atom(self): + return [self.ijR_index_spin_to_atom(i, j, R) for R, i, j in self.exchange_Jdict] + def get_J(self, i, j, R, default=None): i = self.i_spin(i) j = self.i_spin(j) @@ -306,7 +325,11 @@ def get_J_tensor(self, i, j, R, iso_only=False): i = self.i_spin(i) j = self.i_spin(j) if iso_only: - Jtensor = np.eye(3) * self.get_J(i, j, R) + J = self.get_Jiso(i, j, R) + if J is not None: + Jtensor = np.eye(3) * self.get_J(i, j, R) + else: + Jtensor = np.eye(3) * 0 else: Jtensor = combine_J_tensor( Jiso=self.get_J(i, j, R), @@ -315,7 +338,7 @@ def get_J_tensor(self, i, j, R, iso_only=False): ) return Jtensor - def get_full_Jtensor_for_one_R(self, R): + def get_full_Jtensor_for_one_R(self, R, iso_only=False): """ Return the full exchange tensor of all i and j for cell R. param R (tuple of integers): cell index R @@ -326,15 +349,17 @@ def get_full_Jtensor_for_one_R(self, R): Jmat = np.zeros((n3, n3), dtype=float) for i in range(self.nspin): for j in range(self.nspin): - Jmat[i * 3 : i * 3 + 3, j * 3 : j * 3 + 3] = self.get_J_tensor(i, j, R) + Jmat[i * 3 : i * 3 + 3, j * 3 : j * 3 + 3] = self.get_J_tensor( + i, j, R, iso_only=iso_only + ) return Jmat - def get_full_Jtensor_for_Rlist(self, asr=False, iso_only=False): + def get_full_Jtensor_for_Rlist(self, asr=False, iso_only=True): n3 = self.nspin * 3 nR = len(self.Rlist) Jmat = np.zeros((nR, n3, n3), dtype=float) for iR, R in enumerate(self.Rlist): - Jmat[iR] = self.get_full_Jtensor_for_one_R(R) + Jmat[iR] = self.get_full_Jtensor_for_one_R(R, iso_only=iso_only) if asr: iR0 = np.argmin(np.linalg.norm(self.Rlist, axis=1)) assert np.linalg.norm(self.Rlist[iR0]) == 0 @@ -380,6 +405,7 @@ def write_all(self, path="TB2J_results"): self.write_multibinit(path=os.path.join(path, "Multibinit")) self.write_tom_format(path=os.path.join(path, "TomASD")) self.write_vampire(path=os.path.join(path, "Vampire")) + self.plot_all(savefile=os.path.join(path, "JvsR.pdf")) # self.write_Jq(kmesh=[9, 9, 9], path=path) @@ -529,6 +555,7 @@ def plot_all(self, title=None, savefile=None, show=False): plt.show() plt.clf() plt.close() + gc.collect() # This is to fix the tk error if multiprocess is used. return fig, axes def write_tom_format(self, path): @@ -562,8 +589,8 @@ def gen_distance_dict(ind_mag_atoms, atoms, Rlist): def test_spin_io(): - from ase import Atoms import numpy as np + from ase import Atoms atoms = Atoms( "SrMnO3", @@ -579,7 +606,6 @@ def test_spin_io(): spinat = [[0, 0, x] for x in [0, 3, 0, 0, 0]] charges = [2, 4, 5, 5, 5] index_spin = [-1, 0, -1, -1, -1] - colinear = True Rlist = [[0, 0, 0], [0, 0, 1]] ind_mag_atoms = [1] distance_dict = gen_distance_dict(ind_mag_atoms, atoms, Rlist) diff --git a/TB2J/kpoints.py b/TB2J/kpoints.py index d92b787..8b45179 100644 --- a/TB2J/kpoints.py +++ b/TB2J/kpoints.py @@ -1,4 +1,7 @@ +from collections import Counter + import numpy as np +import spglib def monkhorst_pack(size, gamma_center=False): @@ -12,5 +15,87 @@ def monkhorst_pack(size, gamma_center=False): mkpts = (kpts + 0.5) / size - 0.5 if gamma_center: mkpts += shift - return mkpts + + +def get_ir_kpts(atoms, mesh): + """ + Gamma-centered IR kpoints. mesh : [nk1,nk2,nk3]. + """ + lattice = atoms.get_cell() + positions = atoms.get_scaled_positions() + numbers = atoms.get_atomic_numbers() + + cell = (lattice, positions, numbers) + mapping, grid = spglib.get_ir_reciprocal_mesh(mesh, cell, is_shift=[0, 0, 0]) + return grid[np.unique(mapping)] / np.array(mesh, dtype=float) + + +def ir_kpts( + atoms, mp_grid, is_shift=[0, 0, 0], verbose=True, ir=True, is_time_reversal=False +): + """ + generate kpoints for structure + Parameters: + ------------------ + atoms: ase.Atoms + structure + mp_grid: [nk1,nk2,nk3] + is_shift: shift of k points. default is Gamma centered. + ir: bool + Irreducible or not. + """ + cell = (atoms.get_cell(), atoms.get_scaled_positions(), atoms.get_atomic_numbers()) + # print(spglib.get_spacegroup(cell, symprec=1e-5)) + mesh = mp_grid + # Gamma centre mesh + mapping, grid = spglib.get_ir_reciprocal_mesh( + mesh, cell, is_shift=is_shift, is_time_reversal=is_time_reversal, symprec=1e-4 + ) + if not ir: + return (np.array(grid).astype(float) + np.asarray(is_shift) / 2.0) / mesh, [ + 1.0 / len(mapping) + ] * len(mapping) + # All k-points and mapping to ir-grid points + # for i, (ir_gp_id, gp) in enumerate(zip(mapping, grid)): + # print("%3d ->%3d %s" % (i, ir_gp_id, gp.astype(float) / mesh)) + cnt = Counter(mapping) + ids = list(cnt.keys()) + weight = list(cnt.values()) + weight = np.array(weight) * 1.0 / sum(weight) + ird_kpts = [ + (grid[id].astype(float) + np.asarray(is_shift) / 2.0) / mesh for id in ids + ] + + # Irreducible k-points + # print("Number of ir-kpoints: %d" % len(np.unique(mapping))) + # print(grid[np.unique(mapping)] / np.array(mesh, dtype=float)) + + new_ir_kpts = [] + new_weight = [] + for ik, k in enumerate(ird_kpts): + # add k and -k to ird_kpts if -k is not in ird_kpts + if not any([np.allclose(-1.0 * k, kpt) for kpt in new_ir_kpts]): + new_ir_kpts.append(k) + new_ir_kpts.append(-1.0 * k) + new_weight.append(weight[ik] / 2) + new_weight.append(weight[ik] / 2) + else: + new_ir_kpts.append(k) + new_weight.append(weight[ik]) + # return ird_kpts, weight + return np.array(new_ir_kpts), np.array(new_weight) + + +def test_ir_kpts(): + from ase.build import bulk + + atoms = bulk("Si") + mesh = [14, 14, 14] + kpts = get_ir_kpts(atoms, mesh) + print(kpts) + print(len(kpts)) + + +if __name__ == "__main__": + test_ir_kpts() diff --git a/TB2J/manager.py b/TB2J/manager.py deleted file mode 100644 index eab6bc6..0000000 --- a/TB2J/manager.py +++ /dev/null @@ -1,433 +0,0 @@ -import os -from TB2J.myTB import MyTB, merge_tbmodels_spin -import numpy as np -from TB2J.exchange import ExchangeNCL -from TB2J.exchangeCL2 import ExchangeCL2 -from TB2J.exchange_qspace import ExchangeCLQspace -from TB2J.utils import read_basis, auto_assign_basis_name -from ase.io import read -from TB2J.sisl_wrapper import SislWrapper -from TB2J.gpaw_wrapper import GPAWWrapper -from TB2J.wannier import parse_atoms - - -def gen_exchange( - path, - colinear=True, - groupby="spin", - posfile=None, - prefix_up="wannier90.up", - prefix_dn="wannier90.dn", - prefix_SOC="wannier90", - min_hopping_norm=1e-4, - max_distance=None, - efermi=0, - magnetic_elements=[], - kmesh=[4, 4, 4], - emin=-12.0, - emax=0.0, - nz=100, - exclude_orbs=[], - Rcut=None, - ne=None, - use_cache=False, - np=1, - output_path="TB2J_results", - wannier_type="wannier90", - qspace=False, - orb_decomposition=False, - write_density_matrix=False, - description="", -): - try: - fname = os.path.join(path, posfile) - print(f"Reading atomic structure from file {fname}.") - atoms = read(os.path.join(path, posfile)) - except Exception: - print( - f"Cannot read atomic structure from file {fname}. Trying to read from Wannier input." - ) - if colinear: - fname = os.path.join(path, f"{prefix_up}.win") - else: - fname = os.path.join(path, f"{prefix_SOC}.win") - - print(f"Reading atomic structure from file {fname}.") - atoms = parse_atoms(fname) - - basis_fname = os.path.join(path, "basis.txt") - if colinear: - if wannier_type.lower() == "wannier90": - print("Reading Wannier90 hamiltonian: spin up.") - tbmodel_up = MyTB.read_from_wannier_dir( - path=path, prefix=prefix_up, atoms=atoms, nls=False - ) - print("Reading Wannier90 hamiltonian: spin down.") - tbmodel_dn = MyTB.read_from_wannier_dir( - path=path, prefix=prefix_dn, atoms=atoms, nls=False - ) - if os.path.exists(basis_fname): - basis = read_basis(basis_fname) - else: - basis, _ = auto_assign_basis_name( - tbmodel_up.xred, - atoms, - write_basis_file=os.path.join(output_path, "assigned_basis.txt"), - ) - elif wannier_type.lower() == "banddownfolder": - print("Reading Banddownfolder hamiltonian: spin up.") - tbmodel_up = MyTB.load_banddownfolder( - path=path, prefix=prefix_up, atoms=atoms, nls=False - ) - print("Reading Banddownfolder hamiltonian: spin down.") - tbmodel_dn = MyTB.load_banddownfolder( - path=path, prefix=prefix_dn, atoms=atoms, nls=False - ) - - basis, _ = auto_assign_basis_name( - tbmodel_up.xred, - atoms, - write_basis_file=os.path.join(output_path, "assigned_basis.txt"), - ) - else: - raise ValueError("wannier_type should be Wannier90 or banddownfolder.") - - print("Starting to calculate exchange.") - description = f""" Input from collinear Wannier90 data. - Tight binding data from {path}. - Prefix of wannier function files:{prefix_up} and {prefix_dn}. -Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. -\n""" - if not qspace: - exchange = ExchangeCL2( - tbmodels=(tbmodel_up, tbmodel_dn), - atoms=atoms, - basis=basis, - efermi=efermi, - magnetic_elements=magnetic_elements, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - output_path=output_path, - write_density_matrix=write_density_matrix, - description=description, - ) - else: - exchange = ExchangeCLQspace( - tbmodels=(tbmodel_up, tbmodel_dn), - atoms=atoms, - basis=basis, - efermi=efermi, - magnetic_elements=magnetic_elements, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - output_path=output_path, - write_density_matrix=write_density_matrix, - description=description, - ) - - exchange.run(path=output_path) - print("All calculation finsihed. The results are in TB2J_results directory.") - - elif colinear and wannier_type.lower() == "banddownfolder": - print("Reading Wannier90 hamiltonian: spin up.") - tbmodel_up = MyTB.read_from_wannier_dir( - path=path, prefix=prefix_up, atoms=atoms, groupby=None, nls=False - ) - print("Reading Wannier90 hamiltonian: spin down.") - tbmodel_dn = MyTB.read_from_wannier_dir( - path=path, prefix=prefix_dn, atoms=atoms, groupby=None, nls=False - ) - tbmodel = merge_tbmodels_spin(tbmodel_up, tbmodel_dn) - basis, _ = auto_assign_basis_name( - tbmodel.xred, - atoms, - write_basis_file=os.path.join(output_path, "assigned_basis.txt"), - ) - description = f""" Input from collinear BandDownfolder data. - Tight binding data from {path}. - Prefix of wannier function files:{prefix_up} and {prefix_dn}. -Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. -\n""" - print("Starting to calculate exchange.") - exchange = ExchangeCL2( - tbmodels=tbmodel, - atoms=atoms, - basis=basis, - efermi=efermi, - magnetic_elements=magnetic_elements, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - output_path=output_path, - write_density_matrix=write_density_matrix, - description=description, - ) - exchange.run(path=output_path) - print("All calculation finsihed. The results are in TB2J_results directory.") - else: - print("Reading Wannier90 hamiltonian: non-colinear spin.") - groupby = groupby.lower().strip() - if groupby not in ["spin", "orbital"]: - raise ValueError("groupby can only be spin or orbital.") - tbmodel = MyTB.read_from_wannier_dir( - path=path, prefix=prefix_SOC, atoms=atoms, groupby=groupby, nls=True - ) - if os.path.exists(basis_fname): - print("The use of basis file is deprecated. It will be ignored.") - # basis = read_basis(basis_fname) - else: - basis, _ = auto_assign_basis_name( - tbmodel.xred, - atoms, - write_basis_file=os.path.join(output_path, "assigned_basis.txt"), - ) - description = f""" Input from non-collinear Wannier90 data. - Tight binding data from {path}. - Prefix of wannier function files:{prefix_SOC}. -Warning: Please check if the noise level of Wannier function Hamiltonian to make sure it is much smaller than the exchange values. - The DMI component parallel to the spin orientation, the Jani which has the component of that orientation should be disregarded - e.g. if the spins are along z, the xz, yz, zz, zx, zy components and the z component of DMI. - If you need these component, try to do three calculations with spin along x, y, z, or use structure with z rotated to x, y and z. And then use TB2J_merge.py to get the full set of parameters. - -\n""" - print("Starting to calculate exchange.") - exchange = ExchangeNCL( - tbmodels=tbmodel, - atoms=atoms, - basis=basis, - efermi=efermi, - magnetic_elements=magnetic_elements, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - description=description, - output_path=output_path, - write_density_matrix=write_density_matrix, - orb_decomposition=orb_decomposition, - ) - print("\n") - exchange.run(path=output_path) - print(f"All calculation finsihed. The results are in {output_path} directory.") - - -def gen_exchange_siesta( - fdf_fname, - magnetic_elements=[], - include_orbs=None, - kmesh=[5, 5, 5], - emin=-12.0, - emax=0.0, - nz=100, - exclude_orbs=[], - Rcut=None, - ne=None, - np=1, - use_cache=False, - output_path="TB2J_results", - orb_decomposition=False, - description="", -): - try: - import sisl - except: - raise ImportError("sisl cannot be imported. Please install sisl first.") - - from packaging import version - - if version.parse(sisl.__version__) <= version.parse("0.10.0"): - raise ImportError( - f"sisl version is {sisl.__version__}, but should be larger than 0.10.0." - ) - - include_orbs = {} - if isinstance(magnetic_elements, str): - magnetic_elements = [magnetic_elements] - for element in magnetic_elements: - if "_" in element: - elem = element.split("_")[0] - orb = element.split("_")[1:] - include_orbs[elem] = orb - else: - include_orbs[element] = None - magnetic_elements = list(include_orbs.keys()) - - fdf = sisl.get_sile(fdf_fname) - # geom = fdf.read_geometry() - H = fdf.read_hamiltonian() - geom = H.geometry - if H.spin.is_colinear: - print("Reading Siesta hamiltonian: colinear spin.") - tbmodel_up = SislWrapper(H, spin=0, geom=geom) - tbmodel_dn = SislWrapper(H, spin=1, geom=geom) - basis = dict(zip(tbmodel_up.orbs, list(range(tbmodel_up.norb)))) - print("Starting to calculate exchange.") - description = f""" Input from collinear Siesta data. - working directory: {os.getcwd()} - fdf_fname: {fdf_fname}. -\n""" - exchange = ExchangeCL2( - tbmodels=(tbmodel_up, tbmodel_dn), - atoms=tbmodel_up.atoms, - basis=basis, - efermi=0.0, - magnetic_elements=magnetic_elements, - include_orbs=include_orbs, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - output_path=output_path, - description=description, - ) - exchange.run(path=output_path) - print("\n") - print(f"All calculation finsihed. The results are in {output_path} directory.") - - elif H.spin.is_colinear and False: - print( - "Reading Siesta hamiltonian: colinear spin. Treat as non-colinear. For testing only." - ) - tbmodel = SislWrapper(H, spin="merge", geom=geom) - basis = dict(zip(tbmodel.orbs, list(range(tbmodel.nbasis)))) - print("Starting to calculate exchange.") - description = f""" Input from collinear Siesta data. - working directory: {os.getcwd()} - fdf_fname: {fdf_fname}. -\n""" - exchange = ExchangeNCL( - tbmodels=tbmodel, - atoms=tbmodel.atoms, - basis=basis, - efermi=0.0, - magnetic_elements=magnetic_elements, - include_orbs=include_orbs, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - description=description, - output_path=output_path, - orb_decomposition=orb_decomposition, - ) - exchange.run(path=output_path) - print("\n") - print(f"All calculation finsihed. The results are in {output_path} directory.") - - elif H.spin.is_spinorbit or H.spin.is_noncolinear: - print("Reading Siesta hamiltonian: non-colinear spin.") - tbmodel = SislWrapper(H, spin=None, geom=geom) - basis = dict(zip(tbmodel.orbs, list(range(tbmodel.nbasis)))) - print("Starting to calculate exchange.") - description = f""" Input from non-collinear Siesta data. - working directory: {os.getcwd()} - fdf_fname: {fdf_fname}. -Warning: The DMI component parallel to the spin orientation, the Jani which has the component of that orientation should be disregarded - e.g. if the spins are along z, the xz, yz, zz, zx, zy components and the z component of DMI. - If you need these component, try to do three calculations with spin along x, y, z, or use structure with z rotated to x, y and z. And then use TB2J_merge.py to get the full set of parameters. -\n""" - exchange = ExchangeNCL( - tbmodels=tbmodel, - atoms=tbmodel.atoms, - basis=basis, - efermi=0.0, - magnetic_elements=magnetic_elements, - include_orbs=include_orbs, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - ne=ne, - np=np, - use_cache=use_cache, - description=description, - output_path=output_path, - orb_decomposition=orb_decomposition, - ) - exchange.run(path=output_path) - print("\n") - print(f"All calculation finsihed. The results are in {output_path} directory.") - - -def gen_exchange_gpaw( - gpw_fname, - magnetic_elements=[], - kmesh=[3, 3, 3], - emin=-12.0, - emax=0.0, - nz=50, - exclude_orbs=[], - Rcut=None, - use_cache=False, - output_path="TB2J_results", - description="", -): - print("Reading from GPAW data and calculate electronic structure.") - model = GPAWWrapper(gpw_fname=gpw_fname) - efermi = model.calc.get_fermi_level() - print(f"Fermi Energy: {efermi}") - poses = np.vstack([model.positions, model.positions]) - basis, _ = auto_assign_basis_name( - poses, - model.atoms, - write_basis_file=os.path.join(output_path, "assigned_basis.txt"), - ) - - if model.calc.get_spin_polarized(): - print("Starting to calculate exchange.") - exchange = ExchangeNCL( - tbmodels=model, - atoms=model.atoms, - efermi=efermi, - basis=basis, - magnetic_elements=magnetic_elements, - kmesh=kmesh, - emin=emin, - emax=emax, - nz=nz, - exclude_orbs=exclude_orbs, - Rcut=Rcut, - use_cache=use_cache, - output_path=output_path, - description=description, - ) - exchange.run(path=output_path) - print("\n") - print(f"All calculation finsihed. The results are in {output_path} directory.") diff --git a/TB2J/mathutils.py b/TB2J/mathutils.py deleted file mode 100644 index bee9ccc..0000000 --- a/TB2J/mathutils.py +++ /dev/null @@ -1,12 +0,0 @@ -import numpy as np -from scipy.linalg import inv, eigh - - -def Lowdin(S): - """ - Calculate S^(-1/2). - Which is used in lowind's symmetric orthonormalization. - psi_prime = S^(-1/2) psi - """ - eigval, eigvec = eigh(S) - return eigvec @ np.diag(np.sqrt(1.0 / eigval)) @ (eigvec.T.conj()) diff --git a/TB2J/mathutils/fermi.py b/TB2J/mathutils/fermi.py index 66d60ec..938004e 100644 --- a/TB2J/mathutils/fermi.py +++ b/TB2J/mathutils/fermi.py @@ -1,8 +1,9 @@ -import numpy as np -import warnings import sys -MAX_EXP_ARGUMENT = np.log(sys.float_info.max) +import numpy as np + +MAX_EXP_ARGUMENT = np.log(sys.float_info.max / 100000) +print(MAX_EXP_ARGUMENT) def fermi(e, mu, width=0.01): @@ -15,8 +16,12 @@ def fermi(e, mu, width=0.01): """ x = (e - mu) / width # disable overflow warning - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - ret = np.where(x < MAX_EXP_ARGUMENT, 1 / (1.0 + np.exp(x)), 0.0) + # with warnings.catch_warnings(): + # warnings.simplefilter("ignore") + # ret = np.where(x < MAX_EXP_ARGUMENT, 1 / (1.0 + np.exp(x)), 0.0) + ret = np.zeros_like(x, dtype=float) + for i, xi in enumerate(x): + if xi < 700: + ret[i] = 1 / (1.0 + np.exp(xi)) return ret diff --git a/TB2J/mathutils/fibonacci_sphere.py b/TB2J/mathutils/fibonacci_sphere.py new file mode 100644 index 0000000..2de4d57 --- /dev/null +++ b/TB2J/mathutils/fibonacci_sphere.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +""" +scan the spheres in the space of (theta, phi), so that the points are uniformly distributed on a sphere. +The algorithm is based on fibonacci spiral sampling method. +Reference: +https://extremelearning.com.au/how-to-evenly-distribute-points-on-a-sphere-more-effectively-than-the-canonical-fibonacci-lattice/ + +But note that the convention of theta and phi are different from the one in the reference. +Here, cos(theta) = z, and phi is the azimuthal angle in the x-y plane. +In the reference, theta is the azimuthal angle in the x-y plane, and phi is the polar angle. +""" + +import numpy as np +from numpy import pi + + +def fibonacci_sphere(samples=100): + """ + Fibonacci Sphere Sampling Method + Parameters: + samples (int): number of points to sample on the sphere + Returns: + theta and phi: numpy arrays with shape (samples,) containing the angles of the sampled points. + """ + # Initialize the golden ratio and angles + goldenRatio = (1 + np.sqrt(5)) / 2 + phi = np.arange(samples) * (2 * pi / goldenRatio) + theta = np.arccos(1 - 2 * np.arange(samples) / samples) + return theta, phi + + +def fibonacci_semisphere(samples=100): + """ + Fibonacci Sphere Sampling Method for the upper hemisphere of a sphere. + + Parameters: + samples (int): number of points to sample on the sphere + + Returns: + theta and phi: numpy arrays with shape (samples,) containing the angles of the sampled points. + """ + # Initialize the golden ratio and angles + goldenRatio = (1 + np.sqrt(5)) / 2 + phi = np.arange(samples) * (2 * pi / goldenRatio) + theta = np.arccos(np.linspace(0, 1, samples)) + return theta, phi + + +def test_fibonacci_sphere(): + import matplotlib.pyplot as plt + + # Generate points on the sphere + samples = 20000 + theta, phi = fibonacci_sphere(samples) + # theta, phi = fibonacci_semisphere(samples) + + # Convert spherical coordinates to Cartesian coordinates + x = np.cos(phi) * np.sin(theta) + y = np.sin(phi) * np.sin(theta) + z = np.cos(theta) + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + ax.scatter(x, y, z, s=13.5) + # ax.plot(x, y, z) + + # Set aspect to 'equal' for equal scaling in all directions + ax.set_aspect("equal") + + plt.show() + + +if __name__ == "__main__": + test_fibonacci_sphere() diff --git a/TB2J/mathutils/lowdin.py b/TB2J/mathutils/lowdin.py index bee9ccc..f32bdf3 100644 --- a/TB2J/mathutils/lowdin.py +++ b/TB2J/mathutils/lowdin.py @@ -1,5 +1,5 @@ import numpy as np -from scipy.linalg import inv, eigh +from scipy.linalg import eigh def Lowdin(S): @@ -9,4 +9,14 @@ def Lowdin(S): psi_prime = S^(-1/2) psi """ eigval, eigvec = eigh(S) - return eigvec @ np.diag(np.sqrt(1.0 / eigval)) @ (eigvec.T.conj()) + S_half = eigvec @ np.diag(np.sqrt(1.0 / eigval)) @ (eigvec.T.conj()) + return S_half + + +def Lowdin_symmetric_orthonormalization(H, S): + """ + Lowdin's symmetric orthonormalization. + """ + S_half = Lowdin(S) + H_prime = S_half @ H @ S_half + return H_prime diff --git a/TB2J/mathutils/rotate_spin.py b/TB2J/mathutils/rotate_spin.py index af0c5b2..0450a9e 100644 --- a/TB2J/mathutils/rotate_spin.py +++ b/TB2J/mathutils/rotate_spin.py @@ -1,5 +1,7 @@ import numpy as np -from TB2J.pauli import pauli_block_all, s0, s1, s2, s3, gather_pauli_blocks +from scipy.sparse import eye_array, kron + +from TB2J.pauli import gather_pauli_blocks, pauli_block_all def rotate_Matrix_from_z_to_axis(M, axis, normalize=True): @@ -14,6 +16,240 @@ def rotate_Matrix_from_z_to_axis(M, axis, normalize=True): return M_new +def spherical_to_cartesian(theta, phi, normalize=True): + """ + Convert spherical coordinates to cartesian + """ + x = np.sin(theta) * np.cos(phi) + y = np.sin(theta) * np.sin(phi) + z = np.cos(theta) + vec = np.array([x, y, z]) + if normalize: + vec = vec / np.linalg.norm(vec) + return vec + + +def rotation_matrix(theta, phi): + """ + The unitray operator U, that U^dagger * s3 * U is the rotated s3 by theta and phi + """ + U = np.array( + [ + [np.cos(theta / 2), np.exp(-1j * phi) * np.sin(theta / 2)], + [-np.exp(1j * phi) * np.sin(theta / 2), np.cos(theta / 2)], + ] + ) + return U + + +def rotate_spinor_single_block(M, theta, phi): + """ + Rotate the spinor matrix M by theta and phi + """ + U = rotation_matrix(theta, phi) + Uinv = np.linalg.inv(U) + return Uinv @ M @ U + + +def rotate_spinor_matrix(M, theta, phi, method="einsum"): + """ + Rotate the spinor matrix M by theta and phi, + """ + if method == "plain": + return rotate_spinor_matrix_plain(M, theta, phi) + elif method == "einsum": + return rotate_spinor_matrix_einsum(M, theta, phi) + elif method == "reshape": + return rotate_spinor_matrix_reshape(M, theta, phi) + elif method == "kron": + return rotate_spinor_matrix_kron(M, theta, phi) + elif method == "spkron": + return rotate_spinor_matrix_spkron(M, theta, phi) + else: + raise ValueError(f"Unknown method: {method}") + + +def rotate_spinor_matrix_plain(M, theta, phi): + """ + M is a matrix with shape (2N, 2N), where N is the number of sites, and each site has a 2x2 matrix + rotate each 2x2 block by theta and phi + """ + Mnew = np.zeros_like(M) + U = rotation_matrix(theta, phi) + UT = U.conj().T + tmp = np.zeros((2, 2), dtype=np.complex128) + for i in range(M.shape[0] // 2): + for j in range(M.shape[0] // 2): + for k in range(2): + for l in range(2): + tmp[k, l] = M[2 * i + k, 2 * j + l] + # tmp[:, :]=M[2*i:2*i+2, 2*j:2*j+2] + Mnew[2 * i : 2 * i + 2, 2 * j : 2 * j + 2] = UT @ tmp @ U + return Mnew + + +def rotate_spinor_matrix_einsum(M, theta, phi): + """ + Rotate the spinor matrix M by theta and phi, + """ + shape = M.shape + n1 = np.product(shape[:-1]) // 2 + n2 = M.shape[-1] // 2 + Mnew = np.reshape(M, (n1, 2, n2, 2)) # .swapaxes(1, 2) + # print("Mnew:", Mnew) + U = rotation_matrix(theta, phi) + UT = U.conj().T + Mnew = np.einsum( + "ij, rjsk, kl -> risl", UT, Mnew, U, optimize=True, dtype=np.complex128 + ) + Mnew = Mnew.reshape(shape) + return Mnew + + +def rotate_spinor_matrix_einsum_R(M, theta, phi): + """ + Rotate the spinor matrix M by theta and phi, + """ + nR = M.shape[0] + N = M.shape[1] // 2 + Mnew = np.reshape(M, (nR, N, 2, N, 2)) # .swapaxes(1, 2) + U = rotation_matrix(theta, phi) + UT = U.conj().T + Mnew = np.einsum( + "ij, nrjsk, kl -> nrisl", UT, Mnew, U, optimize=True, dtype=np.complex128 + ) + Mnew = Mnew.reshape(nR, 2 * N, 2 * N) + return Mnew + + +def rotate_spinor_Matrix_R(M, theta, phi): + return rotate_spinor_matrix_einsum_R(M, theta, phi) + + +def rotate_spinor_matrix_reshape(M, theta, phi): + """ + Rotate the spinor matrix M by theta and phi, + """ + N = M.shape[0] // 2 + Mnew = np.reshape(M, (N, 2, N, 2)).swapaxes(1, 2) + # print("Mnew:", Mnew) + U = rotation_matrix(theta, phi) + UT = U.conj().T + Mnew = UT @ Mnew @ U + Mnew = Mnew.swapaxes(1, 2).reshape(2 * N, 2 * N) + return Mnew + + +def rotate_spinor_matrix_kron(M, theta, phi): + """ """ + U = rotation_matrix(theta, phi) + # U = np.kron( U, np.eye(M.shape[0]//2)) + # U = kron(eye_array(M.shape[0]//2), U) + U = np.kron(np.eye(M.shape[0] // 2), U) + M = U.conj().T @ M @ U + return M + + +def rotate_spinor_matrix_spkron(M, theta, phi): + """ """ + U = rotation_matrix(theta, phi) + # U = np.kron( U, np.eye(M.shape[0]//2)) + U = kron(eye_array(M.shape[0] // 2), U) + # U = np.kron(np.eye(M.shape[0]//2), U) + M = U.conj().T @ M @ U + return M + + +def test_rotate_spinor_M(): + N = 2 + M_re = np.random.rand(N * 2, N * 2) + M_im = np.random.rand(N * 2, N * 2) + M = M_re + 1j * M_im + M = M + M.T.conj() + # M=np.array([[1, 0], [0, 1]], dtype=np.complex128) + print(f"Original M: {M}") + + import timeit + + print("Time for rotate_spinor_matrix") + print( + timeit.timeit(lambda: rotate_spinor_matrix(M, np.pi / 2, np.pi / 2), number=10) + ) + print("Time for rotate_spinor_matrix_einsum") + print( + timeit.timeit( + lambda: rotate_spinor_matrix_einsum(M, np.pi / 2, np.pi / 2), number=10 + ) + ) + print("Time for rotate_spinor_matrix_reshape") + print( + timeit.timeit( + lambda: rotate_spinor_matrix_reshape(M, np.pi / 2, np.pi / 2), number=10 + ) + ) + print("Time for rotate_spinor_matrix_kron") + print( + timeit.timeit( + lambda: rotate_spinor_matrix_kron(M, np.pi / 2, np.pi / 2), number=10 + ) + ) + print("Time for rotate_spinor_matrix_spkron") + print( + timeit.timeit( + lambda: rotate_spinor_matrix_spkron(M, np.pi / 2, np.pi / 2), number=10 + ) + ) + + Mrot1 = rotate_spinor_matrix(M, np.pi / 2, np.pi / 2) + Mrot2 = rotate_spinor_matrix_einsum(M, np.pi / 2, np.pi / 2) + Mrot3 = rotate_spinor_matrix_reshape(M, np.pi / 2, np.pi / 2) + Mrot4 = rotate_spinor_matrix_kron(M, np.pi / 2, np.pi / 2) + Mrot5 = rotate_spinor_matrix_spkron(M, np.pi / 2, np.pi / 2) + print(f"Rotated M with jit:\n {Mrot1}") + print(f"Rotated M with einsum:\n {Mrot2-Mrot1}") + print(f"Rotated M with reshape:\n {Mrot3-Mrot1}") + print(f"Rotated M with kron:\n {Mrot4-Mrot1}") + print(f"Rotated M with spkron:\n {Mrot5-Mrot1}") + + M_rot00 = rotate_spinor_matrix(M, 0, 0) + M_rot00_sph = rotate_Matrix_from_z_to_spherical(M, 0, 0) + print(f"Rotated M with theta=0, phi=0 compared with M:\n {M_rot00-M}") + print(f"Rotated M with theta=0, phi=0 compared with M:\n {M_rot00_sph-M}") + + +def test_rotate_spinor_oneblock(): + M = np.array([[1.1, 0], [0, 0.9]]) + print(np.array(pauli_block_all(M)).ravel()) + print("Rotate by pi/2, pi/2 (z to y)") + Mnew = rotate_spinor_matrix_einsum(M, np.pi / 2, np.pi / 2) + print(np.array(pauli_block_all(Mnew)).ravel()) + + print("Rotate by pi/2, 0 (z to x)") + Mnew = rotate_spinor_matrix_kron(M, np.pi / 2, 0) + + print(np.array(pauli_block_all(Mnew)).ravel()) + + print(Mnew) + + +def rotate_Matrix_from_z_to_spherical(M, theta, phi, normalize=True): + """ + Given a spinor matrix M, rotate it from z-axis to spherical coordinates + """ + # axis = spherical_to_cartesian(theta, phi, normalize) + # return rotate_Matrix_from_z_to_axis(M, axis, normalize) + return rotate_spinor_matrix_einsum(M, theta, phi) + + +def test_rotate_Matrix_from_z_to_spherical(): + M_re = np.random.rand(2, 2) + M_im = np.random.rand(2, 2) + M = M_re + 1j * M_im + print(M) + M_rot = rotate_Matrix_from_z_to_spherical(M, 0, 0) + print(M_rot) + + def test_rotate_Matrix_from_z_to_axis(): M = np.array([[1.1, 0], [0, 0.9]]) print(pauli_block_all(M)) @@ -32,4 +268,8 @@ def test_rotate_Matrix_from_z_to_axis(): if __name__ == "__main__": - test_rotate_Matrix_from_z_to_axis() + # test_rotate_Matrix_from_z_to_axis() + # test_rotate_Matrix_from_z_to_spherical() + test_rotate_spinor_M() + # test_rotate_spinor_oneblock() + pass diff --git a/TB2J/mycfr.py b/TB2J/mycfr.py new file mode 100644 index 0000000..50ab5d8 --- /dev/null +++ b/TB2J/mycfr.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# File: mycfr.py +# Time: 2017/12/10 19:29:43 +""" +Continued fraction representation. +""" + +import numpy as np +from scipy.linalg import eig + +kb = 8.61733e-5 # Boltzmann constant in eV + + +class CFR: + """ + Integration with the continued fraction representation. + """ + + def __init__(self, nz=50, T=60): + self.nz = nz + self.T = 600 + self.beta = 1 / (kb * self.T) + self.Rinf = 1e10 + if nz <= 0: + raise ValueError("nz should be larger than 0.") + else: + self.prepare_poles() + + def prepare_poles(self): + ##b_mat = [1 / (2.0 * np.sqrt((2 * (j + 1) - 1) * (2 * (j + 1) + 1)) / (kb * self.#T)) for j in range(0, self.nz- 1)] + jmat = np.arange(0, self.nz - 1) + b_mat = 1 / (2.0 * np.sqrt((2 * (jmat + 1) - 1) * (2 * (jmat + 1) + 1))) + b_mat = np.diag(b_mat, -1) + np.diag(b_mat, 1) + self.poles, residules = eig(b_mat) + + residules = 0.25 * np.abs(residules[0, :]) ** 2 / self.poles**2 + # residules = 0.25 * np.diag(residules) ** 2 / self.poles**2 + + # self.weights = np.where( + # #np.real(self.poles) > 0, 4.0j / self.beta * residules, 0.0 + # #np.real(self.poles) > 0, 2.0j / self.beta * residules, 2.0j / self.beta * residules + # np.real(self.poles) > 0, 2.0j / self.beta * residules, 0.0 + # ) + + # self.path = 1j / self.poles * kb * self.T + + self.path = [] + self.weights = [] + for p, r in zip(self.poles, residules): + if p > 0: + self.path.append(1j / p * kb * self.T) + w = 2.0j / self.beta * r + self.weights.append(w) + self.path.append(-1j / p * kb * self.T) + self.weights.append(w) + + self.path = np.array(self.path) + self.weights = np.array(self.weights) + + # from J. Phys. Soc. Jpn. 88, 114706 (2019) + # A_mat = -1/2 *np.diag(1, -1) + np.diag(1, 1) + # B_mat = np.diag([2*i-1 for i in range(1, self.nz)]) + # eigp, eigv = eig(A_mat, B_mat) + # zp = 1j / eigp * kb * self.T + # Rp = 0.25 * np.diag(eigv)**2 * zp **2 + + # print the poles and the weights + for i in range(len(self.poles)): + print("Pole: ", self.poles[i], "Weight: ", self.weights[i]) + + # add a point to the poles: 1e10j + self.path = np.concatenate((self.path, [self.Rinf * 1j])) + # self.path = np.concatenate((self.path, [0.0])) + self.npoles = len(self.poles) + + # add weights for the point 1e10j + # self.weights = np.concatenate((self.weights, [-self.Rinf])) + self.weights = np.concatenate((self.weights, [00.0])) + # zeros moment is 1j * R * test_gf(1j * R), but the real part of it will be taken. In contrast to the other part, where the imaginary part is taken. + + def integrate_values(self, gf_vals, imag=False): + ret = 0 + for i in range(self.npoles + 1): + ret += self.weights[i] * gf_vals[i] + ret *= ( + -np.pi / 2 + ) # This is to be compatible with the integration of contour, where /-np.pi is used after the integration. And the factor of 2 is because the Fermi function here is 2-fold degenerate. + if imag: + return np.imag(ret) + return ret + + def integrate_func(self, gf, ef=0): + """ + Integration with the continued fraction representation. + + :param gf: Green's function + :param ef: Fermi energy + :return: integration result + """ + path = self.path + gf_vals = gf(path, ef=ef) + return self.integrate_values(gf_vals) + + +def test_cfr(): + cfr = CFR(nz=100) + + def test_gf(z, ef=0.1): + return 1 / (z - 3 + ef) + + r = cfr.integrate_func(test_gf, ef=2) + r = -np.imag(r) / np.pi * 2 + print(r) + return r + + +if __name__ == "__main__": + test_cfr() diff --git a/TB2J/orbital_magmom.py b/TB2J/orbital_magmom.py new file mode 100644 index 0000000..261c688 --- /dev/null +++ b/TB2J/orbital_magmom.py @@ -0,0 +1,36 @@ +""" +utilities to handel orbital magnetic moments +""" + +import numpy as np + + +def complex_spherical_harmonic_to_real_spherical_harmonic(l=1): + """ + matrix to convert the complex spherical harmonics to real spherical harmonics + """ + MCR = np.zeros((2 * l + 1, 2 * l + 1), dtype=np.complex128) + MCR[0 + l, 0 + l] = 1.0 + for m in range(1, l + 1): + mi = m + l + mpi = -m + l + MCR[mi, mi] = (-1) ** m / 2 + MCR[mi, mpi] = 1.0 / 2 + MCR[mpi, mi] = -1j * (-1) ** m / 2 + MCR[mpi, mpi] = 1j / 2 + + return MCR + + +def test_complex_spherical_harmonic_to_real_spherical_harmonic(): + """ + test the conversion matrix + """ + l = 3 + MCR = complex_spherical_harmonic_to_real_spherical_harmonic(l) + # print(MCR*np.sqrt(2)) + print(MCR * 2) + + +if __name__ == "__main__": + test_complex_spherical_harmonic_to_real_spherical_harmonic() diff --git a/TB2J/orbmap.py b/TB2J/orbmap.py index acc50b4..ceb97cb 100644 --- a/TB2J/orbmap.py +++ b/TB2J/orbmap.py @@ -1,5 +1,6 @@ import re from collections import defaultdict + import numpy as np @@ -7,30 +8,45 @@ def split_orb_name(name): """ split name to : n, l, label """ - m = re.findall(r"(\d[a-z\d\-]*)(.*)", name) + m = re.findall(r"([a-z\d\-\^\*]*)(.*)", name) m = m[0] return m[0], m[1] def map_orbs_matrix(orblist, spinor=False, include_only=None): + """ + map the orbitals to a matrix + Method: + 1. split the orbital name to n, l, label + 2. group the orbitals by n, l + 3. create a matrix with 1 for each orbital in the group + 4. return the matrix and the group names + """ + if spinor: orblist = orblist[::2] norb = len(orblist) + # print("orblist: ", orblist) ss = [split_orb_name(orb) for orb in orblist] orbdict = dict(zip(ss, range(norb))) reduced_orbdict = defaultdict(lambda: []) + # print(f"Orbital dictionary: {orbdict}") + # print("include_only: ", include_only) + if include_only is None: for key, val in orbdict.items(): reduced_orbdict[key[0]].append(val) else: for key, val in orbdict.items(): - if key[0][:2] in include_only: + if key[0][:2] in include_only or key[0][:1] in include_only: + # [:2] for 3d, 4d, 5d, etc. and [:1] for s, p, d, etc reduced_orbdict[key[0]].append(val) + # print(f"reduced_orbdict: {reduced_orbdict}") reduced_orbs = tuple(reduced_orbdict.keys()) ngroup = len(reduced_orbdict) mmat = np.zeros((norb, ngroup), dtype=int) diff --git a/TB2J/pauli.py b/TB2J/pauli.py index ec34029..53bfa1f 100644 --- a/TB2J/pauli.py +++ b/TB2J/pauli.py @@ -143,11 +143,47 @@ def pauli_block_all(M): return MI, Mx, My, Mz -def gather_pauli_blocks(MI, Mx, My, Mz): +def gather_pauli_blocks(MI, Mx, My, Mz, coeffs=[1.0, 1.0, 1.0, 1.0]): """ Gather the I, x, y, z component of a matrix. """ - return np.kron(MI, s0) + np.kron(Mx, s1) + np.kron(My, s2) + np.kron(Mz, s3) + cI, cx, cy, cz = coeffs + return ( + cI * np.kron(MI, s0) + + cx * np.kron(Mx, s1) + + cy * np.kron(My, s2) + + cz * np.kron(Mz, s3) + ) + + +def pauli_part(M, coeffs=[1.0, 1.0, 1.0, 1.0]): + """ + Get the I, x, y, z part of a matrix. + """ + MI, Mx, My, Mz = pauli_block_all(M) + return gather_pauli_blocks(MI, Mx, My, Mz, coeffs=coeffs) + + +def chargepart(M): + """ + Get the charge part of a matrix. + """ + MI = (M[::2, ::2] + M[1::2, 1::2]) / 2.0 + Mcopy = np.zeros_like(M) + Mcopy[::2, ::2] = MI + Mcopy[1::2, 1::2] = MI + return Mcopy + + +def spinpart(M): + """ + Get the spin part of a matrix. + """ + MI = (M[::2, ::2] + M[1::2, 1::2]) / 2.0 + Mcopy = M.copy() + Mcopy[::2, ::2] -= MI + Mcopy[1::2, 1::2] -= MI + return Mcopy def test_gather_pauli_blocks(): diff --git a/TB2J/symmetrize_J.py b/TB2J/symmetrize_J.py new file mode 100644 index 0000000..8ca5b7c --- /dev/null +++ b/TB2J/symmetrize_J.py @@ -0,0 +1,147 @@ +import copy +from pathlib import Path + +import numpy as np +from sympair import SymmetryPairFinder + +from TB2J.io_exchange import SpinIO +from TB2J.versioninfo import print_license + + +class TB2JSymmetrizer: + def __init__(self, exc, symprec=1e-8, verbose=True, Jonly=False): + # list of pairs with the index of atoms + ijRs = exc.ijR_list_index_atom() + finder = SymmetryPairFinder(atoms=exc.atoms, pairs=ijRs, symprec=symprec) + self.verbose = verbose + self.Jonly = Jonly + + if verbose: + print("=" * 30) + print_license() + print("-" * 30) + print( + "WARNING: The symmetry detection is based on the crystal symmetry, not the magnetic symmetry. Make sure if this is what you want." + ) + print("-" * 30) + if exc.has_dmi: + print( + "WARNING: Currently only the isotropic exchange is symmetrized. Symmetrization of DMI and anisotropic exchange are not yet implemented." + ) + + print(f"Finding crystal symmetry with symprec of {symprec} Angstrom.") + print("Symmetry found:") + print(finder.spacegroup) + print("-" * 30) + self.pgdict = finder.get_symmetry_pair_group_dict() + self.exc = exc + self.new_exc = copy.deepcopy(exc) + self.Jonly = Jonly + + def print_license(self): + print_license() + + def symmetrize_J(self): + """ + Symmetrize the exchange parameters J. + """ + symJdict = {} + # Jdict = self.exc.exchange_Jdict + # ngroup = self.pgdict + for pairgroup in self.pgdict.groups: + ijRs = pairgroup.get_all_ijR() + ijRs_spin = [self.exc.ijR_index_atom_to_spin(*ijR) for ijR in ijRs] + Js = [self.exc.get_J(*ijR_spin) for ijR_spin in ijRs_spin] + Javg = np.average(Js) + for i, j, R in ijRs_spin: + symJdict[(R, i, j)] = Javg + self.new_exc.exchange_Jdict = symJdict + if self.Jonly: + self.new_exc.has_dmi = False + self.new_exc.dmi_ddict = None + self.new_exc.has_bilinear = False + self.new_exc.Jani_dict = None + self.has_uniaxial_anisotropy = False + self.k1 = None + self.k1dir = None + + def output(self, path="TB2J_symmetrized"): + if path is None: + path = Path(".") + self.new_exc.write_all(path=path) + + def run(self, path=None): + print("** Symmetrizing exchange parameters.") + self.symmetrize_J() + print("** Outputing the symmetrized exchange parameters.") + print(f"** Output path: {path} .") + self.output(path=path) + print("** Finished.") + + +def symmetrize_J( + exc=None, + path=None, + fname="TB2J.pickle", + symprec=1e-5, + output_path="TB2J_symmetrized", + Jonly=False, +): + """ + symmetrize the exchange parameters + parameters: + exc: exchange + """ + if exc is None: + exc = SpinIO.load_pickle(path=path, fname=fname) + symmetrizer = TB2JSymmetrizer(exc, symprec=symprec, Jonly=Jonly) + symmetrizer.run(path=output_path) + + +def symmetrize_J_cli(): + from argparse import ArgumentParser + + parser = ArgumentParser( + description="Symmetrize exchange parameters. Currently, it take the crystal symmetry into account and not the magnetic moment into account." + ) + parser.add_argument( + "-i", + "--inpath", + default=None, + help="input path to the exchange parameters", + ) + parser.add_argument( + "-o", + "--outpath", + default="TB2J_results_symmetrized", + help="output path to the symmetrized exchange parameters", + ) + parser.add_argument( + "-s", + "--symprec", + type=float, + default=1e-5, + help="precision for symmetry detection. default is 1e-5 Angstrom", + ) + + parser.add_argument( + "--Jonly", + action="store_true", + help="symmetrize only the exchange parameters and discard the DMI and anisotropic exchange", + default=False, + ) + + args = parser.parse_args() + if args.inpath is None: + parser.print_help() + raise ValueError("Please provide the input path to the exchange.") + symmetrize_J( + path=args.inpath, + output_path=args.outpath, + symprec=args.symprec, + Jonly=args.Jonly, + ) + + +if __name__ == "__main__": + symmetrize_J_cli() diff --git a/TB2J/thetaphi.py b/TB2J/thetaphi.py new file mode 100644 index 0000000..291cb89 --- /dev/null +++ b/TB2J/thetaphi.py @@ -0,0 +1,16 @@ +import numpy as np + + +def theta_phi_even_spaced(n): + """ + Return n evenly spaced theta and phi values in the ranges [0, pi] and [0, 2*pi] respectively. + """ + phis = [] + thetas = [] + for i in range(n): + phi = 2 * np.pi * i / n + phis.append(phi) + r = np.sin(np.pi * i / n) + theta = np.arccos(1 - 2 * r) + thetas.append(theta) + return thetas, phis diff --git a/TB2J/versioninfo.py b/TB2J/versioninfo.py index cd5b0e9..14a093d 100644 --- a/TB2J/versioninfo.py +++ b/TB2J/versioninfo.py @@ -1,6 +1,7 @@ -import TB2J from datetime import datetime +import TB2J + def print_license(): print( diff --git a/docs/src/ReleaseNotes.md b/docs/src/ReleaseNotes.md index ff6c630..59e3c8e 100644 --- a/docs/src/ReleaseNotes.md +++ b/docs/src/ReleaseNotes.md @@ -1,5 +1,27 @@ ## Release Notes ------------------------------------------------------------------------ + +#### Current development version (v1.0.0-alpha) +These are the new features and changes not yet included in the official release. + +- Computing MAE and single-ion anisotropy is now possible with the ABACUS and SIESTA interfaces. +This currently requires an non-official SIESTA branch which can seperate the spin-orbit coupling and the exchange-correlation Hamiltonian. (see this MR: https://gitlab.com/siesta-project/siesta/-/merge\_requests/309) + +- The full implementation of the magnon band structure from the linear spin wave theory. (Thanks to Andres Tellez Mora and Aldo Romero!) + +- An improved method of the downfolding method which implements the ligand correction to the exchange based on the Wannier function method. This requires the updated version of LaWaF (https://github.com/mailhexu/lawaf) and the updated version of the TB2J\_downfold.py script. + +- There is a major refractoring of the interface to the DFT codes. The parsing of the electron Hamiltonian from the DFT codes are now in a separate python package called HamiltonIO (github.com/mailhexu/HamiltonIO). This package is used by TB2J but is made general to be used with other packages too. + + +#### v0.11.0 October 10, 2024 +- Allowing to symmetrize the exchange according to the crystal symmetry with the TB2J\_symmetrize.py script. Note that the spin order is not considered in this symmetrization process. + +#### v0.10.0 September 1, 2024 +- Improved orbital-decomposition to the ABACUS interface. +- Allow computing anisotropic J and DMI without three or more calculations within ABACUS and SIESTA interfaces. + + #### v0.9.0 March 22, 2024 Improved merge method for anisotropic exchange and DMI. (thanks to Andres Tellez Mora!) diff --git a/docs/src/symmetry.md b/docs/src/symmetry.md new file mode 100644 index 0000000..6f3221f --- /dev/null +++ b/docs/src/symmetry.md @@ -0,0 +1,42 @@ +### Symmetrization of the exchange parameters + +The exchange parameters obtained from the output of TB2J may not preserve the symmetry of the crystal. This can be attributed to two reasons: + +1. The magnetic state breaks the symmetry of the crystal. As TB2J perturbs this magnetic state, the J values should reflect the symmetry of the magnetic state. + +2. Numerical noise can be present in the calculation, either during the DFT stage, the Wannierization procedure, or the post-processing within TB2J. This numerical noise should ideally be very small. If it is not, the calculation procedure should be optimized. + +In some cases, it is necessary for the exchange parameters to strictly adhere to the symmetry of the crystal structure. To achieve this, you can use the script `TB2J_symmetrize.py` to symmetrize the exchange parameters. + +:Warning: Please note that the current version of the script only considers the symmetry of the crystal and does not take the magnetic moment into account. + +:Warning: Additionally, only the isotropic exchange is symmetrized in this version. The symmetrization of the DMI and anisotropic exchange will be implemented in a future version. + +The `TB2J_symmetrize.py` script utilizes the symmetry of the crystal to identify symmetrically equivalent atom pairs and the corresponding symmetry operators. It then averages the J values over these symmetrically equivalent atom pairs. + +Here is the usage information for the script: + +``` +usage: TB2J_symmetrize.py [-h] [-i INPATH] [-o OUTPATH] [-s SYMPREC] + +Symmetrize exchange parameters. Currently, it takes the crystal symmetry into account and not the magnetic moment. +Also, only the isotropic exchange is symmetrized in this version. The symmetrization of the DMI and anisotropic exchange will be implemented in a future version. + +options: + -h, --help show this help message and exit + -i INPATH, --inpath INPATH + input path to the exchange parameters + -o OUTPATH, --outpath OUTPATH + output path to the symmetrized exchange parameters + -s SYMPREC, --symprec SYMPREC + precision for symmetry detection. The default value is 1e-5 Angstrom +``` + +An example: + +```bash +TB2J_symmetrize.py -i TB2J_results -o TB2J_symmetrized -s 1e-4 +``` + +The script can read the data from TB2J\_results and write the symmetrized exchange parameters to a new directory TB2J\_symmetrized. The precistion for detecting the symmetry is 1e-4 angstrom. + diff --git a/docs/src/tutorial.rst b/docs/src/tutorial.rst index f6cf847..7744760 100644 --- a/docs/src/tutorial.rst +++ b/docs/src/tutorial.rst @@ -13,5 +13,6 @@ Tutorial rotate_and_merge.rst downfold.md orbital_contribution.md + symmetry.md output.rst diff --git a/requirements.txt b/requirements.txt index 76a75d7..acbb2cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,5 @@ ase>=3.19 tqdm>=4.42.0 pathos packaging>=20.0 +sympair>=0.1.0 pre-commit diff --git a/scripts/abacus2J.py b/scripts/abacus2J.py index 4cdac46..bce0f5f 100755 --- a/scripts/abacus2J.py +++ b/scripts/abacus2J.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 -from TB2J.abacus.gen_exchange_abacus import gen_exchange_abacus -from TB2J.versioninfo import print_license -import sys import argparse +import sys + +from TB2J.interfaces import gen_exchange_abacus +from TB2J.versioninfo import print_license def run_abacus2J(): @@ -80,6 +81,7 @@ def run_abacus2J(): ) parser.add_argument( + "--nproc", "--np", help="number of cpu cores to use in parallel, default: 1", default=1, @@ -120,28 +122,21 @@ def run_abacus2J(): print("Please input the magnetic elements, e.g. --elements Fe Ni") sys.exit() - include_orbs = {} - for element in args.elements: - if "_" in element: - elem = element.split("_")[0] - orb = element.split("_")[1:] - include_orbs[elem] = orb - else: - include_orbs[element] = None + # include_orbs = {} gen_exchange_abacus( path=args.path, suffix=args.suffix, kmesh=args.kmesh, - magnetic_elements=list(include_orbs.keys()), - include_orbs=include_orbs, + magnetic_elements=args.elements, + include_orbs={}, Rcut=args.rcut, emin=args.emin, nz=args.nz, description=args.description, output_path=args.output_path, use_cache=args.use_cache, - np=args.np, + nproc=args.nproc, exclude_orbs=args.exclude_orbs, orb_decomposition=args.orb_decomposition, ) diff --git a/scripts/siesta2J.py b/scripts/siesta2J.py index e028e19..e9dc399 100755 --- a/scripts/siesta2J.py +++ b/scripts/siesta2J.py @@ -1,8 +1,9 @@ #!/usr/bin/env python3 -from TB2J.manager import gen_exchange_siesta -from TB2J.versioninfo import print_license -import sys import argparse +import sys + +from TB2J.interfaces import gen_exchange_siesta +from TB2J.versioninfo import print_license def run_siesta2J(): @@ -107,26 +108,42 @@ def run_siesta2J(): default="TB2J_results", ) + parser.add_argument( + "--split_soc", + help="whether the SOC part of the Hamiltonian can be read from the output of siesta. Default: False", + action="store_true", + default=False, + ) + + parser.add_argument( + "--orth", + help="whether to use orthogonalization before the diagonization of the electron Hamiltonian. Default: False", + action="store_true", + default=False, + ) + args = parser.parse_args() if args.elements is None: print("Please input the magnetic elements, e.g. --elements Fe Ni") sys.exit() - include_orbs = {} - for element in args.elements: - if "_" in element: - elem = element.split("_")[0] - orb = element.split("_")[1:] - include_orbs[elem] = orb - else: - include_orbs[element] = None + # include_orbs = {} + # for element in args.elements: + # if "_" in element: + # elem = element.split("_")[0] + # orb = element.split("_")[1:] + # include_orbs[elem] = orb + # else: + # include_orbs[element] = None gen_exchange_siesta( fdf_fname=args.fdf_fname, kmesh=args.kmesh, - magnetic_elements=list(include_orbs.keys()), - include_orbs=include_orbs, + # magnetic_elements=list(include_orbs.keys()), + # include_orbs=include_orbs, + magnetic_elements=args.elements, + include_orbs={}, Rcut=args.rcut, emin=args.emin, emax=args.emax, @@ -134,9 +151,11 @@ def run_siesta2J(): description=args.description, output_path=args.output_path, use_cache=args.use_cache, - np=args.np, + nproc=args.np, exclude_orbs=args.exclude_orbs, orb_decomposition=args.orb_decomposition, + read_H_soc=args.split_soc, + orth=args.orth, ) diff --git a/scripts/wann2J.py b/scripts/wann2J.py index bc9ba22..2aa46ec 100755 --- a/scripts/wann2J.py +++ b/scripts/wann2J.py @@ -1,7 +1,9 @@ #!/usr/bin/env python3 import argparse import sys -from TB2J.manager import gen_exchange + +from TB2J.exchange_params import add_exchange_args_to_parser +from TB2J.interfaces import gen_exchange from TB2J.versioninfo import print_license @@ -34,114 +36,12 @@ def run_wann2J(): default="wannier90.dn", type=str, ) - parser.add_argument( - "--elements", - help="elements to be considered in Heisenberg model", - default=None, - type=str, - nargs="*", - ) parser.add_argument( "--groupby", help="In the spinor case, the order of the orbitals have two conventions: 1: group by spin (orb1_up, orb2_up,... orb1_down, ...), 2,group by orbital (orb1_up, orb1_down, orb2_up, ...,). Use 'spin' in the former case and 'orbital' in the latter case. The default is spin.", default="spin", type=str, ) - parser.add_argument( - "--write_dm", - help="whether to write density matrix", - action="store_true", - default=False, - ) - - parser.add_argument( - "--rcut", - help="cutoff of spin pair distance. The default is to calculate all commensurate R point to the k mesh.", - default=None, - type=float, - ) - parser.add_argument("--efermi", help="Fermi energy in eV", default=None, type=float) - parser.add_argument( - "--kmesh", - help="kmesh in the format of kx ky kz", - type=int, - nargs="*", - default=[5, 5, 5], - ) - parser.add_argument( - "--emin", - help="energy minimum below efermi, default -14 eV", - type=float, - default=-14.0, - ) - parser.add_argument( - "--emax", - help="energy maximum above efermi, default 0.0 eV", - type=float, - default=0.0, - ) - parser.add_argument( - "--nz", - help="number of steps for semicircle contour, default: 100", - default=100, - type=int, - ) - parser.add_argument( - "--cutoff", - help="The minimum of J amplitude to write, (in eV), default is 1e-5 eV", - default=1e-5, - type=float, - ) - parser.add_argument( - "--exclude_orbs", - help="the indices of wannier functions to be excluded from magnetic site. counting start from 0", - default=[], - type=int, - nargs="+", - ) - - parser.add_argument( - "--np", - help="number of cpu cores to use in parallel, default: 1", - default=1, - type=int, - ) - - parser.add_argument( - "--use_cache", - help="whether to use disk file for temporary storing wavefunctions and hamiltonian to reduce memory usage. Default: False", - action="store_true", - default=False, - ) - - parser.add_argument( - "--description", - help="add description of the calculatiion to the xml file. Essential information, like the xc functional, U values, magnetic state should be given.", - type=str, - default="Calculated with TB2J.", - ) - - parser.add_argument( - "--spinor", - action="store_true", - help="Whether to use spinor wannier function.", - default=False, - ) - - parser.add_argument( - "--orb_decomposition", - default=False, - action="store_true", - help="whether to do orbital decomposition in the non-collinear mode.", - ) - - parser.add_argument( - "--output_path", - help="The path of the output directory, default is TB2J_results", - type=str, - default="TB2J_results", - ) - parser.add_argument( "--wannier_type", help="The type of Wannier function, either Wannier90 or banddownfolder", @@ -154,6 +54,8 @@ def run_wann2J(): # help="Whether to calculate J in qspace first and transform to real space.", # default=False) + add_exchange_args_to_parser(parser) + args = parser.parse_args() if args.efermi is None: @@ -179,7 +81,7 @@ def run_wann2J(): emax=args.emax, nz=args.nz, use_cache=args.use_cache, - np=args.np, + nproc=args.np, description=args.description, output_path=args.output_path, exclude_orbs=args.exclude_orbs, diff --git a/setup.py b/setup.py index 6a5e03f..e15a3e3 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -from setuptools import setup, find_packages +from setuptools import find_packages, setup -__version__ = "0.9.0.3" +__version__ = "0.9.9_rc6" long_description = """TB2J is a Python package aimed to compute automatically the magnetic interactions (superexchange and Dzyaloshinskii-Moriya) between atoms of magnetic crystals from DFT Hamiltonian based on Wannier functions or Linear combination of atomic orbitals. It uses the Green's function method and take the local rigid spin rotation as a perturbation. The package can take the output from Wannier90, which is interfaced with many density functional theory codes or from codes based on localised orbitals. A minimal user input is needed, which allows for an easily integration into a high-throughput workflows. """ @@ -26,15 +26,23 @@ "scripts/TB2J_downfold.py", "scripts/TB2J_eigen.py", ], + entry_points={ + "console_scripts": [ + "TB2J_symmetrize.py=TB2J.symmetrize_J:symmetrize_J_cli", + "lawaf2J.py=TB2J.interfaces.lawaf_interface:lawaf2J_cli", + ] + }, install_requires=[ - "numpy>1.16.5", + "numpy<2.0", "scipy", "matplotlib", "ase>=3.19", "tqdm", "pathos", "packaging>=20.0", + "HamiltonIO>=0.1.9", "pre-commit", + "sympair>0.1.0", ], classifiers=[ "Development Status :: 3 - Alpha", diff --git a/upload_to_pip.sh b/upload_to_pip.sh index 6b1f1d5..2427a0c 100755 --- a/upload_to_pip.sh +++ b/upload_to_pip.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash rm ./dist/* -python3.11 setup.py sdist bdist_wheel -python3.11 -m twine upload --repository pypi dist/* --verbose +#python3 setup.py sdist bdist_wheel +python -m build +python -m twine upload --repository pypi dist/* --verbose