Skip to content

Commit

Permalink
Merge pull request #455 from Cytnx-dev/fix-macinstall
Browse files Browse the repository at this point in the history
Fix MacOS install
jeffry1829 authored Aug 30, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
2 parents d43dedd + f182b32 commit 5d37d81
Showing 2 changed files with 12 additions and 10 deletions.
7 changes: 4 additions & 3 deletions src/linalg/Arnoldi.cpp
Original file line number Diff line number Diff line change
@@ -111,7 +111,7 @@ namespace cytnx {
auto maxIdx = max_indices[ik];
auto eigTens = zeros(qs[0].shape(), Type.ComplexDouble);
for (cytnx_int64 i = 0; i < krydim; ++i) {
eigTens += P_inv[{i, maxIdx}] * qs[i];
eigTens += P_inv[{i, static_cast<cytnx_int64>(maxIdx)}] * qs[i];
}
eigTens /= eigTens.Norm().item();
eigTens_s[ik] = eigTens;
@@ -127,9 +127,10 @@ namespace cytnx {
auto eigTens_s = std::vector<UniTensor>(k, UniTensor());
for (cytnx_int32 ik = 0; ik < k; ++ik) {
auto maxIdx = max_indices[ik];
auto eigTens = P_inv.at({0, maxIdx}) * qs[0];
auto eigTens = P_inv.at({0, static_cast<cytnx_uint64>(maxIdx)}) * qs[0];
for (cytnx_int64 i = 1; i < krydim; ++i) {
eigTens += P_inv.at({i, maxIdx}) * qs[i];
eigTens +=
P_inv.at({static_cast<cytnx_uint64>(i), static_cast<cytnx_uint64>(maxIdx)}) * qs[i];
}
eigTens /= eigTens.Norm().item();
eigTens_s[ik] = eigTens;
15 changes: 8 additions & 7 deletions src/linalg/Lanczos_Exp.cpp
Original file line number Diff line number Diff line change
@@ -127,7 +127,7 @@ namespace cytnx {
double eps1 = std::exp(-(k * std::log(k) + std::log(1.0 + Op_apprx_norm)));

std::vector<UniTensor> vs;
Tensor as = zeros({k + 1, k + 1}, Hop->dtype(), Hop->device());
Tensor as = zeros({(cytnx_uint64)k + 1, (cytnx_uint64)k + 1}, Hop->dtype(), Hop->device());

// Initialized v0 = v
auto v = Tin;
@@ -149,17 +149,17 @@ namespace cytnx {
// For j = 0 to i
for (int j = 0; j <= i; ++j) {
// Let a[j,i] = <v[j], w[i]>
as.at({j, i}) = _Dot(Vs.at(j), w);
as.at({(cytnx_uint64)j, (cytnx_uint64)i}) = _Dot(Vs.at(j), w);
}
// Define wp[i] = w[i] - \sum_{j=0}^{j=i} {a[j,i]v[j]}
auto wp = w;

Check warning on line 155 in src/linalg/Lanczos_Exp.cpp

Codecov / codecov/patch

src/linalg/Lanczos_Exp.cpp#L155

Added line #L155 was not covered by tests
for (int j = 0; j <= i; j++) {
wp -= as.at({j, i}) * Vs.at(j);
wp -= as.at({(cytnx_uint64)j, (cytnx_uint64)i}) * Vs.at(j);
}
// Let a[i+1, i] = |wp[i]|, v[i+1]=wp[i] / a[i+1, i]
auto b = std::sqrt(double(_Dot(wp, wp).real()));
if (i < k) {
as.at({i + 1, i}) = b;
as.at({(cytnx_uint64)i + 1, (cytnx_uint64)i}) = b;
v = wp / b;
Vk.append(v.get_block_());
Vs.push_back(v);
@@ -282,18 +282,19 @@ namespace cytnx {
}
Vk.append(v.get_block_().contiguous());
Vs.push_back(v);
Hp.at({i, i - 1}) = Hp.at({i - 1, i}) = beta;
Hp.at({(cytnx_uint64)i, (cytnx_uint64)i - 1}) =
Hp.at({(cytnx_uint64)i - 1, (cytnx_uint64)i}) = beta;
wp = (Hop->matvec(v)).relabels_(v.labels());
alpha = _Dot(wp, v);
Hp.at({i, i}) = alpha;
Hp.at({(cytnx_uint64)i, (cytnx_uint64)i}) = alpha;
w = (wp - alpha * v - beta * v_old).relabels_(v.labels());

// Converge check
Hp_sub = _resize_mat(Hp, i + 1, i + 1);
// We use ExpM since H*tau may not be Hermitian if tau is complex.
B_mat = linalg::ExpM(Hp_sub * tau);
// Set the error as the element of bottom left of the exp(H_sub*tau)
auto error = abs(B_mat.at({i, 0}));
auto error = abs(B_mat.at({(cytnx_uint64)i, 0}));
if (error < CvgCrit || i == imp_maxiter - 1) {
if (i == imp_maxiter - 1 && error > CvgCrit) {
cytnx_warning_msg(

0 comments on commit 5d37d81

Please sign in to comment.