Skip to content

Commit

Permalink
Merge pull request #453 from Cytnx-dev/dev-master
Browse files Browse the repository at this point in the history
Dev master to master
  • Loading branch information
jeffry1829 authored Aug 30, 2024
2 parents ba1eb2f + 5d37d81 commit fa4596e
Show file tree
Hide file tree
Showing 67 changed files with 2,874 additions and 615 deletions.
7 changes: 3 additions & 4 deletions Install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
#=========================================================
# [Note] Set the destination path for installation in Ins_dest
#----------------------------------------------
# Ins_dest="/usr/local/cytnx"
Ins_dest="~/Cytnx_lib"
Ins_dest="/usr/local/cytnx"
FLAG="${FLAG} -DCMAKE_INSTALL_PREFIX=${Ins_dest}"
#-----------------------------------------------

Expand All @@ -19,7 +18,7 @@ FLAG="${FLAG} -DCMAKE_INSTALL_PREFIX=${Ins_dest}"
# Please follow the guide from official mkl/oneMKL "post-installation" part
# to source the proper setvars.sh and/or vars.sh
#---------------------------
FLAG="${FLAG} -DUSE_MKL=ON"
FLAG="${FLAG} -DUSE_MKL=OFF"
#---------------------------
# 2-b) use OpenBLAS (DEFAULT = on (by settung DUSE_MKL=OFF above))
# [Note] By default it will automatically find openblas installed
Expand Down Expand Up @@ -155,7 +154,7 @@ FLAG="${FLAG} -DUSE_OMP=OFF"
#=========================================================
# [Note] Wheather to run cytnx tests (DEFAULT = OFF)
#-----------------------------------
DRUN_TESTS=ON
DRUN_TESTS=OFF
FLAG="${FLAG} -DRUN_TESTS=${DRUN_TESTS}"
#-----------------------------------

Expand Down
1 change: 1 addition & 0 deletions bm_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ add_executable(
linalg/Directsum_bm.cpp
linalg/Svd_bm.cpp
linalg/Svd_truncate_bm.cpp
linalg/Lanczos_bm.cpp

)
if(USE_CUDA)
Expand Down
138 changes: 138 additions & 0 deletions bm_tests/linalg/Lanczos_bm.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
#include <benchmark/benchmark.h>
#include <cytnx.hpp>
using namespace cytnx;

namespace BMTest_Lanczos {
UniTensor CreateOneSiteEffHam(const int d, const int D, const unsigned int dypte = Type.Double,
const int device = Device.cpu);
UniTensor CreateA(const int d, const int D, const unsigned int dtype = Type.Double,
const int device = Device.cpu);
class OneSiteOp : public LinOp {
public:
OneSiteOp(const int d, const int D, const unsigned int dtype = Type.Double,
const int& device = Device.cpu)
: LinOp("mv", D * D, dtype, device) {
EffH = CreateOneSiteEffHam(d, D, dtype, device);
}
UniTensor EffH;

/*
* |-|--"vil" "pi" "vir"--|-| |-|--"vil" "pi" "vir"--|-|
* | | + | | "po" | | + | |
* |L|- -------O----------|R| dot | = |L|- -------O----------|R|
* | | + | | "vol"--A--"vor" | | + | |
* |_|--"vol" "po" "vor"--|_| |_|---------A----------|_|
*
* Then relabels ["vil", "pi", "vir"] -> ["vol", "po", "vor"]
*
* "vil":virtual in bond left
* "po":physical out bond
*/
UniTensor matvec(const UniTensor& A) override {
auto tmp = Contract(EffH, A);
tmp.permute_({"vil", "pi", "vir"}, 1);
tmp.relabels_(A.labels());
return tmp;
}
};

// describe:test not supported UniTensor Type

/*
* -1
* |
* 0--A--2
*/
UniTensor CreateA(const int d, const int D, const unsigned int dtype, const int device) {
double low = -1.0, high = 1.0;
UniTensor A = UniTensor({Bond(D), Bond(d), Bond(D)}, {}, -1, dtype, device)
.set_name("A")
.relabels_({"vol", "po", "vor"})
.set_rowrank_(1);
if (Type.is_float(A.dtype())) {
random::uniform_(A, low, high, 0);
}
return A;
}

/*
* |-|--"vil" "pi" "vir"--|-|
* | | + | |
* |L|- -------O----------|R|
* | | + | |
* |_|--"vol" "po" "vor"--|_|
*/
UniTensor CreateOneSiteEffHam(const int d, const int D, const unsigned int dtype,
const int device) {
double low = -1.0, high = 1.0;
std::vector<Bond> bonds = {Bond(D), Bond(d), Bond(D), Bond(D), Bond(d), Bond(D)};
std::vector<std::string> heff_labels = {"vil", "pi", "vir", "vol", "po", "vor"};
UniTensor HEff = UniTensor(bonds, {}, -1, dtype, device)
.set_name("HEff")
.relabels_(heff_labels)
.set_rowrank(bonds.size() / 2);
auto HEff_shape = HEff.shape();
auto in_dim = 1;
for (int i = 0; i < HEff.rowrank(); ++i) {
in_dim *= HEff_shape[i];
}
auto out_dim = in_dim;
if (Type.is_float(HEff.dtype())) {
random::uniform_(HEff, low, high, 0);
}
auto HEff_mat = HEff.get_block();
HEff_mat.reshape_({in_dim, out_dim});
HEff_mat = HEff_mat + HEff_mat.permute({1, 0}); // symmtrize

// Let H can be converge in ExpM
auto eigs = HEff_mat.Eigh();
auto e = UniTensor(eigs[0], true) * 0.01;
e.set_labels({"a", "b"});
auto v = UniTensor(eigs[1]);
v.set_labels({"i", "a"});
auto vt = UniTensor(linalg::InvM(v.get_block()));
vt.set_labels({"b", "j"});
HEff_mat = Contract(Contract(e, v), vt).get_block();

// HEff_mat = linalg::Matmul(HEff_mat, HEff_mat.permute({1, 0}).Conj()); // positive definete
HEff_mat.reshape_(HEff_shape);
HEff.put_block(HEff_mat);
return HEff;
}

static void BM_Lanczos_Gnd_F64(benchmark::State& state) {
// prepare data
int d = 2;
auto D = state.range(0);
auto op = OneSiteOp(d, D);
auto Tin = CreateA(d, D);
const double crit = 1.0e+8;
const int maxiter = 2;
bool is_V = true;
int k = 1;
bool is_row = false;
int max_krydim = 0;
// start test here
for (auto _ : state) {
auto x = linalg::Lanczos(&op, Tin, "Gnd", crit, maxiter, k, is_V, is_row, max_krydim);
}
}
BENCHMARK(BM_Lanczos_Gnd_F64)->Args({10})->Args({30})->Unit(benchmark::kMillisecond);

static void BM_Lanczos_Exp_F64(benchmark::State& state) {
// prepare data
int d = 2;
auto D = state.range(0);
auto op = OneSiteOp(d, D);
auto Tin = CreateA(d, D);
const double crit = 1.0e+8;
double tau = 0.1;
const int maxiter = 2;
// start test here
for (auto _ : state) {
auto x = linalg::Lanczos_Exp(&op, Tin, tau, crit, maxiter);
}
}
BENCHMARK(BM_Lanczos_Exp_F64)->Args({10})->Args({30})->Unit(benchmark::kMillisecond);

} // namespace BMTest_Lanczos
14 changes: 1 addition & 13 deletions cmake/Modules/FindCUQUANTUM.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,7 @@ else()
endif()

message(STATUS " cudaver: ${CUDAToolkit_VERSION_MAJOR}" )
if((${CUDAToolkit_VERSION_MAJOR} LESS_EQUAL 10))
message(FATAL_ERROR "cuquantum requires CUDA ver.11+")
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 11) AND (${CUDAToolkit_VERSION_MAJOR} LESS 12) AND (${CUDAToolkit_VERSION_MINOR} LESS_EQUAL 0))
set(CUTNLIB_DIR "lib/11.0")
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 11) AND (${CUDAToolkit_VERSION_MAJOR} LESS 12) AND (${CUDAToolkit_VERSION_MINOR} GREATER_EQUAL 1))
set(CUTNLIB_DIR "lib/11")
elseif((${CUDAToolkit_VERSION_MAJOR} GREATER_EQUAL 12))
if(EXISTS "${CUQUANTUM_ROOT}/lib/12")
set(CUTNLIB_DIR "lib/12")
else()
set(CUTNLIB_DIR "lib")
endif()
endif()
set(CUTNLIB_DIR "lib")

set(CUQUANTUM_LIBRARY_DIRS ${CUQUANTUM_ROOT}/${CUTNLIB_DIR})
set(CUQUANTUM_INCLUDE_DIRS ${CUQUANTUM_ROOT}/include)
Expand Down
10 changes: 10 additions & 0 deletions cytnx/UniTensor_conti.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,16 @@ def relabel_(self, idx:int, new_label:str):



@add_ovld_method(UniTensor)
def relabel_(self, old_labels:List[str],new_labels:List[str]):
self.c_relabel_(old_labels,new_labels);
return self

@add_ovld_method(UniTensor)
def relabel_(self, new_labels:List[str]):
self.c_relabel_(new_labels);
return self

@add_ovld_method(UniTensor)
def relabels_(self, old_labels:List[str],new_labels:List[str]):
self.c_relabels_(old_labels,new_labels);
Expand Down
Loading

0 comments on commit fa4596e

Please sign in to comment.