diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml index 55d563f..96acfe1 100644 --- a/.github/workflows/publish_pypi.yml +++ b/.github/workflows/publish_pypi.yml @@ -54,8 +54,7 @@ jobs: bash ci-utils/install_prereq_linux.sh && mkdir -p /tmp/argolid_bld && cp -r local_install /tmp/argolid_bld - CIBW_BEFORE_ALL_LINUX: yum -y install wget && - wget https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 && + CIBW_BEFORE_ALL_LINUX: curl -L https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 -o nasm-2.15.05.tar.bz2 && tar -xjf nasm-2.15.05.tar.bz2 && cd nasm-2.15.05 && ./configure && @@ -73,7 +72,10 @@ jobs: CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install" CMAKE_ARGS="-DCMAKE_GENERATOR=Ninja" CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair -w {dest_dir} {wheel}" CIBW_ARCHS: ${{ matrix.cibw_archs }} - CIBW_BEFORE_TEST_LINUX: yum -y install maven java + CIBW_BEFORE_TEST_LINUX: sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && + yum -y install maven java CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0 CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v diff --git a/.github/workflows/wheel_build.yml b/.github/workflows/wheel_build.yml index 88d3398..c11d7d1 100644 --- a/.github/workflows/wheel_build.yml +++ b/.github/workflows/wheel_build.yml @@ -52,8 +52,7 @@ jobs: bash ci-utils/install_prereq_linux.sh && mkdir -p /tmp/argolid_bld && cp -r local_install /tmp/argolid_bld - CIBW_BEFORE_ALL_LINUX: yum -y install wget && - wget https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 && + CIBW_BEFORE_ALL_LINUX: curl -L https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.bz2 -o nasm-2.15.05.tar.bz2 && tar -xjf nasm-2.15.05.tar.bz2 && cd nasm-2.15.05 && ./configure && @@ -71,7 +70,10 @@ jobs: CIBW_ENVIRONMENT_WINDOWS: PATH="$TEMP\\argolid\\bin;$PATH" ON_GITHUB="TRUE" ARGOLID_DEP_DIR="C:\\TEMP\\argolid_bld\\local_install" CMAKE_ARGS="-DCMAKE_GENERATOR=Ninja" CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: "delvewheel repair -w {dest_dir} {wheel}" CIBW_ARCHS: ${{ matrix.cibw_archs }} - CIBW_BEFORE_TEST_LINUX: yum -y install maven java + CIBW_BEFORE_TEST_LINUX: sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && + yum -y install maven java CIBW_TEST_REQUIRES: bfio tensorstore numpy==1.24.0 CIBW_TEST_COMMAND: python -W default -m unittest discover -s {project}/tests -v diff --git a/CMakeLists.txt b/CMakeLists.txt index ad5f41c..c3ead3c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -52,7 +52,9 @@ find_package(filepattern REQUIRED) find_package(Threads QUIET) if (Threads_FOUND) if (CMAKE_USE_PTHREADS_INIT) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + endif() endif (CMAKE_USE_PTHREADS_INIT) list(APPEND Build_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) else () diff --git a/setup.py b/setup.py index cef5f4d..928f28b 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,6 @@ import os import re import sys -import sysconfig import versioneer import platform import subprocess @@ -102,6 +101,7 @@ def build_extension(self, ext): package_dir={"": "src/python"}, ext_modules=[CMakeExtension("argolid/libargolid")], test_suite="tests", + install_requires=["pydantic"], zip_safe=False, python_requires=">=3.8", ) diff --git a/src/chunked_base_to_pyr_gen.cpp b/src/chunked_base_to_pyr_gen.cpp index df465c0..b0a3ce7 100644 --- a/src/chunked_base_to_pyr_gen.cpp +++ b/src/chunked_base_to_pyr_gen.cpp @@ -40,7 +40,7 @@ void ChunkedBaseToPyramid::CreatePyramidImages( const std::string& input_chunked int base_level_key, int min_dim, VisType v, - std::unordered_map& channel_ds_config, + const std::unordered_map& channel_ds_config, BS::thread_pool& th_pool) { int resolution = 1; // this gets doubled in each level up @@ -121,7 +121,7 @@ template void ChunkedBaseToPyramid::WriteDownsampledImage( const std::string& input_file, const std::string& input_scale_key, const std::string& output_file, const std::string& output_scale_key, int resolution, VisType v, - std::unordered_map& channel_ds_config, + const std::unordered_map& channel_ds_config, BS::thread_pool& th_pool) { auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v); diff --git a/src/chunked_base_to_pyr_gen.h b/src/chunked_base_to_pyr_gen.h index 6927181..97d0d7b 100644 --- a/src/chunked_base_to_pyr_gen.h +++ b/src/chunked_base_to_pyr_gen.h @@ -13,7 +13,7 @@ class ChunkedBaseToPyramid{ int base_scale_key, int min_dim, VisType v, - std::unordered_map& channel_ds_config, + const std::unordered_map& channel_ds_config, BS::thread_pool& th_pool); private: @@ -21,7 +21,7 @@ class ChunkedBaseToPyramid{ void WriteDownsampledImage( const std::string& input_file, const std::string& input_scale_key, const std::string& output_file, const std::string& output_scale_key, int resolution, VisType v, - std::unordered_map& channel_ds_config, + const std::unordered_map& channel_ds_config, BS::thread_pool& th_pool); }; } // ns argolid diff --git a/src/pyramid_view.cpp b/src/pyramid_view.cpp index 6447388..eea14e0 100644 --- a/src/pyramid_view.cpp +++ b/src/pyramid_view.cpp @@ -25,7 +25,6 @@ #include "tensorstore/kvstore/kvstore.h" #include "tensorstore/open.h" #include "filepattern/filepattern.h" -#include #include "pyramid_view.h" #include "chunked_base_to_pyr_gen.h" @@ -33,8 +32,6 @@ #include "pugixml.hpp" #include #include "plog/Initializers/RollingFileInitializer.h" -#include -using json = nlohmann::json; namespace fs = std::filesystem; using::tensorstore::Context; @@ -42,7 +39,7 @@ using::tensorstore::internal_zarr::ChooseBaseDType; namespace argolid { - void PyramidView::AssembleBaseLevel(VisType v) { + void PyramidView::AssembleBaseLevel(VisType v, const image_map& coordinate_map, const std::string& zarr_array_path) { if (v!=VisType::NG_Zarr && v!=VisType::Viv) { PLOG_INFO << "Unsupported Pyramid type requested"; return; @@ -51,14 +48,14 @@ namespace argolid { int grid_x_max = 0, grid_y_max = 0, grid_c_max = 0; int img_count = 0; - for (const auto & [name, location]: base_image_map) { + for (const auto & [name, location]: coordinate_map) { const auto[gx, gy, gc] = location; gc > grid_c_max ? grid_c_max = gc : grid_c_max = grid_c_max; gx > grid_x_max ? grid_x_max = gx : grid_x_max = grid_x_max; gy > grid_y_max ? grid_y_max = gy : grid_y_max = grid_y_max; ++img_count; } - PLOG_INFO << "Total images found: " << img_count << std::endl; + PLOG_DEBUG << "Total images found: " << img_count << std::endl; auto t1 = std::chrono::high_resolution_clock::now(); auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v); @@ -66,15 +63,15 @@ namespace argolid { if (img_count != 0) { size_t write_failed_count = 0; - const auto & sample_tiff_file = image_coll_path + "/" + base_image_map.begin() -> first; + const auto & sample_tiff_file = image_coll_path + "/" + coordinate_map.begin() -> first; TENSORSTORE_CHECK_OK_AND_ASSIGN(auto test_source, tensorstore::Open( GetOmeTiffSpecToRead(sample_tiff_file), tensorstore::OpenMode::open, tensorstore::ReadWriteMode::read).result()); auto test_image_shape = test_source.domain().shape(); - whole_image._chunk_size_x = test_image_shape[4]; - whole_image._chunk_size_y = test_image_shape[3]; + whole_image._chunk_size_x = test_image_shape[4] + 2*x_spacing; + whole_image._chunk_size_y = test_image_shape[3] + 2*y_spacing; whole_image._full_image_width = (grid_x_max + 1) * whole_image._chunk_size_x; whole_image._full_image_height = (grid_y_max + 1) * whole_image._chunk_size_y; whole_image._num_channels = grid_c_max + 1; @@ -88,8 +85,8 @@ namespace argolid { whole_image._data_type = test_source.dtype().name(); new_image_shape[c_dim] = whole_image._num_channels; - auto output_spec = [&test_source, &new_image_shape, &chunk_shape, this]() { - return GetZarrSpecToWrite(base_zarr_path, new_image_shape, chunk_shape, ChooseBaseDType(test_source.dtype()).value().encoded_dtype); + auto output_spec = [&test_source, &new_image_shape, &chunk_shape, &zarr_array_path, this]() { + return GetZarrSpecToWrite(zarr_array_path, new_image_shape, chunk_shape, ChooseBaseDType(test_source.dtype()).value().encoded_dtype); }(); TENSORSTORE_CHECK_OK_AND_ASSIGN(auto dest, tensorstore::Open( @@ -99,14 +96,14 @@ namespace argolid { tensorstore::ReadWriteMode::write).result()); auto t4 = std::chrono::high_resolution_clock::now(); - for (const auto & [file_name, location]: base_image_map) { + for (const auto & [file_name, location]: coordinate_map) { th_pool.push_task([ &dest, file_name=file_name, location=location, x_dim=x_dim, y_dim=y_dim, c_dim=c_dim, v, &whole_image, this]() { TENSORSTORE_CHECK_OK_AND_ASSIGN(auto source, tensorstore::Open( GetOmeTiffSpecToRead(image_coll_path + "/" + file_name), tensorstore::OpenMode::open, tensorstore::ReadWriteMode::read).result()); - PLOG_INFO << "Opening " << file_name; + PLOG_DEBUG << "Opening " << file_name; auto image_shape = source.domain().shape(); auto image_width = image_shape[4]; auto image_height = image_shape[3]; @@ -127,12 +124,12 @@ namespace argolid { tensorstore::IndexTransform < > transform = tensorstore::IdentityTransform(dest.domain()); if (v == VisType::NG_Zarr) { transform = (std::move(transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid * whole_image._chunk_size_y, image_height) | - tensorstore::Dims(x_dim).SizedInterval(x_grid * whole_image._chunk_size_x, image_width)).value(); + tensorstore::Dims(y_dim).SizedInterval(y_grid * whole_image._chunk_size_y + y_spacing, image_height) | + tensorstore::Dims(x_dim).SizedInterval(x_grid * whole_image._chunk_size_x + x_spacing, image_width)).value(); } else if (v == VisType::Viv) { transform = (std::move(transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid * whole_image._chunk_size_y, image_height) | - tensorstore::Dims(x_dim).SizedInterval(x_grid * whole_image._chunk_size_x, image_width)).value(); + tensorstore::Dims(y_dim).SizedInterval(y_grid * whole_image._chunk_size_y + y_spacing, image_height) | + tensorstore::Dims(x_dim).SizedInterval(x_grid * whole_image._chunk_size_x + x_spacing, image_width)).value(); } tensorstore::Write(array, dest | transform).value(); }); @@ -143,154 +140,30 @@ namespace argolid { base_image = whole_image; } - void PyramidView::ReAssembleBaseLevelWithNewMap(VisType v, const image_map& m, const std::string& output_path) { - - if (v!=VisType::NG_Zarr && v!=VisType::Viv) { - PLOG_INFO << "Unsupported Pyramid type requested"; - return; - } - - auto [x_dim, y_dim, c_dim, num_dims] = GetZarrParams(v); - - auto input_spec = [this]() { - return GetZarrSpecToRead(base_zarr_path); - }(); - - TENSORSTORE_CHECK_OK_AND_ASSIGN(auto base_store, tensorstore::Open( - input_spec, - tensorstore::OpenMode::open, - tensorstore::ReadWriteMode::read).result()); - auto base_image_shape = base_store.domain().shape(); - auto read_chunk_shape = base_store.chunk_layout().value().read_chunk_shape(); - - std::vector < std::int64_t > new_image_shape(num_dims, 1); - std::vector < std::int64_t > chunk_shape(num_dims, 1); - - new_image_shape[y_dim] = base_image_shape[y_dim]; - new_image_shape[x_dim] = base_image_shape[x_dim]; - - chunk_shape[y_dim] = read_chunk_shape[y_dim]; - chunk_shape[x_dim] = read_chunk_shape[x_dim]; - - auto open_mode = tensorstore::OpenMode::create; - open_mode = open_mode | tensorstore::OpenMode::delete_existing; - new_image_shape[c_dim] = base_image_shape[c_dim]; - - - auto output_spec = [v, &output_path, &new_image_shape, & chunk_shape, & base_store, this]() { - if (v == VisType::NG_Zarr) { - return GetZarrSpecToWrite(output_path + "/0", new_image_shape, chunk_shape, ChooseBaseDType(base_store.dtype()).value().encoded_dtype); - } else if (v == VisType::Viv) { - return GetZarrSpecToWrite(output_path + "/0", new_image_shape, chunk_shape, ChooseBaseDType(base_store.dtype()).value().encoded_dtype); - } - }(); - - TENSORSTORE_CHECK_OK_AND_ASSIGN(auto dest, tensorstore::Open( - output_spec, - open_mode, - tensorstore::ReadWriteMode::write).result()); - - size_t write_failed_count = 0; - - for (const auto & [file_name, location]: m) { - // find where to read data from - const auto base_location = [file_name=file_name, this]() -> std::optional < std::tuple < std::uint32_t, - uint32_t, uint32_t >> { - if (auto search = base_image_map.find(file_name); search != base_image_map.end()) { - return std::optional { - search -> second - }; - } else { - return std::nullopt; - } - }(); - - if (!base_location.has_value()) { - continue; - } - - th_pool.push_task([ &base_store, &dest, file_name=file_name, location=location, base_location, x_dim=x_dim, y_dim=y_dim, c_dim=c_dim, v, this]() { - - const auto & [x_grid_base, y_grid_base, c_grid_base] = base_location.value(); - - tensorstore::IndexTransform < > read_transform = tensorstore::IdentityTransform(base_store.domain()); - - if (v == VisType::NG_Zarr) { - read_transform = (std::move(read_transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid_base, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid_base * base_image._chunk_size_y, base_image._chunk_size_y) | - tensorstore::Dims(x_dim).SizedInterval(x_grid_base * base_image._chunk_size_x, base_image._chunk_size_x)).value(); - } else if (v == VisType::Viv) { - read_transform = (std::move(read_transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid_base, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid_base * base_image._chunk_size_y, base_image._chunk_size_y) | - tensorstore::Dims(x_dim).SizedInterval(x_grid_base * base_image._chunk_size_x, base_image._chunk_size_x)).value(); - } - - auto array = tensorstore::AllocateArray({ - base_image._chunk_size_y, - base_image._chunk_size_x - }, tensorstore::c_order, - tensorstore::value_init, base_store.dtype()); - - // initiate a read - tensorstore::Read(base_store | read_transform, array).value(); - - const auto & [x_grid, y_grid, c_grid] = location; - - tensorstore::IndexTransform < > write_transform = tensorstore::IdentityTransform(dest.domain()); - if (v == VisType::NG_Zarr) { - write_transform = (std::move(write_transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid * base_image._chunk_size_y, base_image._chunk_size_y) | - tensorstore::Dims(x_dim).SizedInterval(x_grid * base_image._chunk_size_x, base_image._chunk_size_x)).value(); - } else if (v == VisType::Viv) { - write_transform = (std::move(write_transform) | tensorstore::Dims(c_dim).SizedInterval(c_grid, 1) | - tensorstore::Dims(y_dim).SizedInterval(y_grid * base_image._chunk_size_y, base_image._chunk_size_y) | - tensorstore::Dims(x_dim).SizedInterval(x_grid * base_image._chunk_size_x, base_image._chunk_size_x)).value(); - } - tensorstore::Write(array, dest | write_transform).value(); - }); - } - - th_pool.wait_for_tasks(); - } - - void PyramidView::GeneratePyramid(std::optional map, + void PyramidView::GeneratePyramid(const image_map& map, VisType v, int min_dim, - std::unordered_map& channel_ds_config) + const std::unordered_map& channel_ds_config) { + const auto image_dir = pyramid_zarr_path + "/" + image_name +".zarr"; + if (fs::exists(image_dir)) fs::remove_all(image_dir); + PLOG_INFO << "GeneratePyramid Start "; if (v!=VisType::NG_Zarr && v!=VisType::Viv) { PLOG_INFO << "Unsupported Pyramid type requested"; return; } - const auto output_zarr_path = [v, this](){ + + + const auto output_zarr_path = [v, &image_dir, this](){ if (v==VisType::Viv){ - return pyramid_zarr_path + "/" + image_name +".zarr/data.zarr/0"; + return image_dir +"/data.zarr/0"; } else { - return pyramid_zarr_path + "/" + image_name +".zarr/0"; + return image_dir +"/0"; } }(); - - if (map.has_value()){ - ReAssembleBaseLevelWithNewMap(v,map.value(),output_zarr_path); - } else { - // copy base level zarr file - fs::path destination{output_zarr_path+"/0"}; - if (!fs::exists(destination)) { - fs::create_directories(destination); - } - - // Iterate over files in the source directory - fs::path source{base_zarr_path}; - for (const auto& entry : fs::directory_iterator(source)) { - const auto& path = entry.path(); - auto destPath = destination / path.filename(); - - // Copy file - if (fs::is_regular_file(path)) { - fs::copy_file(path, destPath, fs::copy_options::overwrite_existing); - } - } - } + PLOG_INFO << "Starting to generate base layer "; + AssembleBaseLevel(v, map, output_zarr_path+"/0") ; + PLOG_INFO << "Finished generating base layer "; // generate pyramid ChunkedBaseToPyramid base_to_pyramid; @@ -298,10 +171,13 @@ namespace argolid { int max_level = static_cast(ceil(log2(std::max({base_image._full_image_width, base_image._full_image_width})))); int min_level = static_cast(ceil(log2(min_dim))); auto max_level_key = max_level-min_level+1; + PLOG_INFO << "Starting to generate pyramid "; base_to_pyramid.CreatePyramidImages(output_zarr_path, output_zarr_path, base_level_key, min_dim, v, channel_ds_config, th_pool); - + PLOG_INFO << "Finished generating pyramid "; + // generate metadata WriteMultiscaleMetadataForImageCollection(image_name, pyramid_zarr_path, base_level_key, max_level_key, v, base_image); + PLOG_INFO << "GeneratePyramid end "; } } // ns argolid \ No newline at end of file diff --git a/src/pyramid_view.h b/src/pyramid_view.h index 31e4950..6e79081 100644 --- a/src/pyramid_view.h +++ b/src/pyramid_view.h @@ -5,7 +5,6 @@ #include #include "utilities.h" #include "BS_thread_pool.hpp" - namespace argolid{ using image_map = std::unordered_map>; @@ -13,29 +12,27 @@ using image_map = std::unordered_map map, + void AssembleBaseLevel(VisType v, const image_map& map, const std::string& zarr_array_path); + void GeneratePyramid(const image_map& map, VisType v, int min_dim, - std::unordered_map& channel_ds_config); + const std::unordered_map& channel_ds_config); private: - std::string image_coll_path, base_zarr_path, pyramid_zarr_path, image_name, pyramid_root; - std::uint16_t max_level; - image_map base_image_map; + std::string image_coll_path, pyramid_zarr_path, image_name; + std::uint16_t x_spacing, y_spacing; BS::thread_pool th_pool; ImageInfo base_image; }; diff --git a/src/python/argolid/__init__.py b/src/python/argolid/__init__.py index 417befb..361ac8a 100644 --- a/src/python/argolid/__init__.py +++ b/src/python/argolid/__init__.py @@ -1,4 +1,4 @@ -from .pyramid_generator import PyramidGenerartor, PyramidView +from .pyramid_generator import PyramidGenerartor, PyramidView, PlateVisualizationMetadata, Downsample from . import _version __version__ = _version.get_versions()['version'] diff --git a/src/python/argolid/pyramid_generator.py b/src/python/argolid/pyramid_generator.py index cc44b89..cd52e7f 100644 --- a/src/python/argolid/pyramid_generator.py +++ b/src/python/argolid/pyramid_generator.py @@ -1,5 +1,36 @@ +from pydantic import BaseModel, Field, field_validator +from typing import Dict, Optional, List from .libargolid import OmeTiffToChunkedPyramidCPP, VisType, DSType, PyramidViewCPP +class Downsample(BaseModel): + channel_name: str + method: str + + @field_validator('method', mode='before') + def check_method_config(cls, v): + if v not in {"mean", "mode_max", "mode_min"}: + raise ValueError(f'Value must be "mean", mode_max or "mode_min".') + return v + +class PlateVisualizationMetadata(BaseModel): + output_type: str + minimum_dimension: int + x_spacing: int + y_spacing: int + channel_downsample_config: Optional[List[Downsample]] = None + + @field_validator('minimum_dimension', 'x_spacing', 'y_spacing', mode='before') + def check_non_negative(cls, v): + if v < 0: + raise ValueError('value must be non-negative') + return v + + @field_validator('output_type') + def check_output_type_config(cls, v): + if v not in {"Viv", "NG_Zarr"}: + raise ValueError(f'Value be "NG_Zarr" or "Viv".') + return v + class PyramidGenerartor: def __init__(self, log_level = None) -> None: self._pyr_generator = OmeTiffToChunkedPyramidCPP() @@ -23,21 +54,27 @@ def set_log_level(self, level): self._pyr_generator.SetLogLevel(level) class PyramidView: - def __init__(self, image_path, pyramid_zarr_loc, output_image_name, image_map, vis_type, log_level = None) -> None: - base_zarr_loc = pyramid_zarr_loc + "/base_zarr_loc" - self._pyr_view = PyramidViewCPP(image_path, base_zarr_loc, pyramid_zarr_loc, output_image_name, image_map) + def __init__(self, image_path, pyramid_zarr_loc, output_image_name, metadata_dict:PlateVisualizationMetadata, log_level = None) -> None: + x_border = (lambda d: d.x_spacing if hasattr(d,'x_spacing') and d.x_spacing is not None else 0)(metadata_dict) + y_border = (lambda d: d.y_spacing if hasattr(d,'y_spacing') and d.y_spacing is not None else 0)(metadata_dict) + self._pyr_view = PyramidViewCPP(image_path, pyramid_zarr_loc, output_image_name, x_border, y_border) self.vis_types_dict ={ "NG_Zarr" : VisType.NG_Zarr, "Viv" : VisType.Viv} self.ds_types_dict = {"mean" : DSType.Mean, "mode_max" : DSType.Mode_Max, "mode_min" : DSType.Mode_Min} - self._pyr_view.AssembleBaseLevel(self.vis_types_dict[vis_type]) + + if hasattr(metadata_dict,'minimum_dimension') and metadata_dict.minimum_dimension is not None: + self._min_dim = metadata_dict.minimum_dimension + else: + self._min_dim = 512 - def generate_pyramid(self, min_dim, vis_type, ds_dict = {}): - channel_ds_dict = {} - for c in ds_dict: - channel_ds_dict[c] = self.ds_types_dict[ds_dict[c]] - self._pyr_view.GeneratePyramid(None, self.vis_types_dict[vis_type], min_dim, channel_ds_dict ) + if hasattr(metadata_dict,'output_type') and metadata_dict.output_type is not None: + self._vis_type = self.vis_types_dict[metadata_dict.output_type] + else: + self._vis_type = VisType.Viv - def regenerate_pyramid(self, image_map, min_dim, vis_type, ds_dict = {}): - channel_ds_dict = {} - for c in ds_dict: - channel_ds_dict[c] = self.ds_types_dict[ds_dict[c]] - self._pyr_view.GeneratePyramid(image_map, self.vis_types_dict[vis_type], min_dim, channel_ds_dict ) \ No newline at end of file + self._channel_downsample_config = {} + if hasattr(metadata_dict,'channel_downsample_config') and metadata_dict.channel_downsample_config is not None: + for c in metadata_dict.channel_downsample_config: + self._channel_downsample_config[c.channel_name] = self.ds_types_dict[metadata_dict.channel_downsample_config[c.method]] + + def generate_pyramid(self, image_map): + self._pyr_view.GeneratePyramid(image_map, self._vis_type, self._min_dim, self._channel_downsample_config) diff --git a/src/python/pyramid_python_interface.cpp b/src/python/pyramid_python_interface.cpp index 8e5e34e..c350e82 100644 --- a/src/python/pyramid_python_interface.cpp +++ b/src/python/pyramid_python_interface.cpp @@ -13,7 +13,7 @@ PYBIND11_MODULE(libargolid, m) { .def("SetLogLevel", &argolid::OmeTiffToChunkedPyramid::SetLogLevel) ; py::class_>(m, "PyramidViewCPP") \ - .def(py::init()) \ + .def(py::init()) \ .def("GeneratePyramid", &argolid::PyramidView::GeneratePyramid) \ .def("AssembleBaseLevel", &argolid::PyramidView::AssembleBaseLevel) ; diff --git a/src/utilities.cpp b/src/utilities.cpp index c278ade..45dd9cd 100644 --- a/src/utilities.cpp +++ b/src/utilities.cpp @@ -8,6 +8,7 @@ #include "pugixml.hpp" #include #include "utilities.h" +#include using json = nlohmann::json; namespace fs = std::filesystem; @@ -21,8 +22,8 @@ tensorstore::Spec GetOmeTiffSpecToRead(const std::string& filename){ }, {"context", { {"cache_pool", {{"total_bytes_limit", 1000000000}}}, - {"data_copy_concurrency", {{"limit", 8}}}, - {"file_io_concurrency", {{"limit", 8}}}, + {"data_copy_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, + {"file_io_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, }}, }).value(); } @@ -37,8 +38,8 @@ tensorstore::Spec GetZarrSpecToWrite( const std::string& filename, }, {"context", { {"cache_pool", {{"total_bytes_limit", 1000000000}}}, - {"data_copy_concurrency", {{"limit", 8}}}, - {"file_io_concurrency", {{"limit", 8}}}, + {"data_copy_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, + {"file_io_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, }}, {"metadata", { {"zarr_format", 2}, @@ -83,8 +84,8 @@ tensorstore::Spec GetNPCSpecToWrite(const std::string& filename, }, {"context", { {"cache_pool", {{"total_bytes_limit", 1000000000}}}, - {"data_copy_concurrency", {{"limit", 8}}}, - {"file_io_concurrency", {{"limit", 8}}}, + {"data_copy_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, + {"file_io_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, }}, {"multiscale_metadata", { {"data_type", dtype}, @@ -106,8 +107,8 @@ tensorstore::Spec GetNPCSpecToWrite(const std::string& filename, }, {"context", { {"cache_pool", {{"total_bytes_limit", 1000000000}}}, - {"data_copy_concurrency", {{"limit", 8}}}, - {"file_io_concurrency", {{"limit", 8}}}, + {"data_copy_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, + {"file_io_concurrency", {{"limit", std::thread::hardware_concurrency()}}}, }}, {"scale_metadata", { {"encoding", "raw"},