Skip to content

Commit

Permalink
[refactor] Fix "const CompileConfig *" to "const CompileConfig &" (ta…
Browse files Browse the repository at this point in the history
…ichi-dev#7243)

Issue: taichi-dev#7002,
taichi-dev#7159 (comment)

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
2 people authored and quadpixels committed May 13, 2023
1 parent 1ea2f18 commit db4267d
Show file tree
Hide file tree
Showing 35 changed files with 160 additions and 167 deletions.
97 changes: 46 additions & 51 deletions taichi/analysis/offline_cache_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,55 +14,54 @@
namespace taichi::lang {

static std::vector<std::uint8_t> get_offline_cache_key_of_compile_config(
const CompileConfig *config) {
TI_ASSERT(config);
const CompileConfig &config) {
BinaryOutputSerializer serializer;
serializer.initialize();
serializer(config->arch);
serializer(config->debug);
serializer(config->cfg_optimization);
serializer(config->check_out_of_bound);
serializer(config->opt_level);
serializer(config->external_optimization_level);
serializer(config->move_loop_invariant_outside_if);
serializer(config->demote_dense_struct_fors);
serializer(config->advanced_optimization);
serializer(config->constant_folding);
serializer(config->kernel_profiler);
serializer(config->fast_math);
serializer(config->flatten_if);
serializer(config->make_thread_local);
serializer(config->make_block_local);
serializer(config->detect_read_only);
serializer(config->default_fp->to_string());
serializer(config->default_ip.to_string());
if (arch_is_cpu(config->arch)) {
serializer(config->default_cpu_block_dim);
serializer(config->cpu_max_num_threads);
} else if (arch_is_gpu(config->arch)) {
serializer(config->default_gpu_block_dim);
serializer(config->gpu_max_reg);
serializer(config->saturating_grid_dim);
serializer(config->cpu_max_num_threads);
serializer(config.arch);
serializer(config.debug);
serializer(config.cfg_optimization);
serializer(config.check_out_of_bound);
serializer(config.opt_level);
serializer(config.external_optimization_level);
serializer(config.move_loop_invariant_outside_if);
serializer(config.demote_dense_struct_fors);
serializer(config.advanced_optimization);
serializer(config.constant_folding);
serializer(config.kernel_profiler);
serializer(config.fast_math);
serializer(config.flatten_if);
serializer(config.make_thread_local);
serializer(config.make_block_local);
serializer(config.detect_read_only);
serializer(config.default_fp->to_string());
serializer(config.default_ip.to_string());
if (arch_is_cpu(config.arch)) {
serializer(config.default_cpu_block_dim);
serializer(config.cpu_max_num_threads);
} else if (arch_is_gpu(config.arch)) {
serializer(config.default_gpu_block_dim);
serializer(config.gpu_max_reg);
serializer(config.saturating_grid_dim);
serializer(config.cpu_max_num_threads);
}
serializer(config->ad_stack_size);
serializer(config->default_ad_stack_size);
serializer(config->random_seed);
if (config->arch == Arch::cc) {
serializer(config->cc_compile_cmd);
serializer(config->cc_link_cmd);
} else if (config->arch == Arch::opengl || config->arch == Arch::gles) {
serializer(config->allow_nv_shader_extension);
serializer(config.ad_stack_size);
serializer(config.default_ad_stack_size);
serializer(config.random_seed);
if (config.arch == Arch::cc) {
serializer(config.cc_compile_cmd);
serializer(config.cc_link_cmd);
} else if (config.arch == Arch::opengl || config.arch == Arch::gles) {
serializer(config.allow_nv_shader_extension);
}
serializer(config->make_mesh_block_local);
serializer(config->optimize_mesh_reordered_mapping);
serializer(config->mesh_localize_to_end_mapping);
serializer(config->mesh_localize_from_end_mapping);
serializer(config->mesh_localize_all_attr_mappings);
serializer(config->demote_no_access_mesh_fors);
serializer(config->experimental_auto_mesh_local);
serializer(config->auto_mesh_local_default_occupacy);
serializer(config->real_matrix_scalarize);
serializer(config.make_mesh_block_local);
serializer(config.optimize_mesh_reordered_mapping);
serializer(config.mesh_localize_to_end_mapping);
serializer(config.mesh_localize_from_end_mapping);
serializer(config.mesh_localize_all_attr_mappings);
serializer(config.demote_no_access_mesh_fors);
serializer(config.experimental_auto_mesh_local);
serializer(config.auto_mesh_local_default_occupacy);
serializer(config.real_matrix_scalarize);
serializer.finalize();

return serializer.data;
Expand Down Expand Up @@ -141,7 +140,7 @@ std::string get_hashed_offline_cache_key_of_snode(SNode *snode) {
return picosha2::get_hash_hex_string(hasher);
}

std::string get_hashed_offline_cache_key(const CompileConfig *config,
std::string get_hashed_offline_cache_key(const CompileConfig &config,
Kernel *kernel) {
std::string kernel_ast_string;
if (kernel) {
Expand All @@ -150,11 +149,7 @@ std::string get_hashed_offline_cache_key(const CompileConfig *config,
kernel_ast_string = oss.str();
}

std::vector<std::uint8_t> compile_config_key;
if (config) {
compile_config_key = get_offline_cache_key_of_compile_config(config);
}

auto compile_config_key = get_offline_cache_key_of_compile_config(config);
std::string autodiff_mode =
std::to_string(static_cast<std::size_t>(kernel->autodiff_mode));
picosha2::hash256_one_by_one hasher;
Expand Down
2 changes: 1 addition & 1 deletion taichi/analysis/offline_cache_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class SNode;
class Kernel;

std::string get_hashed_offline_cache_key_of_snode(SNode *snode);
std::string get_hashed_offline_cache_key(const CompileConfig *config,
std::string get_hashed_offline_cache_key(const CompileConfig &config,
Kernel *kernel);
void gen_offline_cache_key(Program *prog, IRNode *ast, std::ostream *os);

Expand Down
8 changes: 4 additions & 4 deletions taichi/cache/gfx/cache_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,13 +159,13 @@ CacheManager::CacheManager(Params &&init_params)
offline_cache_metadata_.version[2] = TI_VERSION_PATCH;
}

CompiledKernelData CacheManager::load_or_compile(const CompileConfig *config,
CompiledKernelData CacheManager::load_or_compile(const CompileConfig &config,
Kernel *kernel) {
if (kernel->is_evaluator) {
spirv::lower(*config, kernel);
spirv::lower(config, kernel);
return gfx::run_codegen(kernel, runtime_->get_ti_device()->arch(),
runtime_->get_ti_device()->get_caps(),
compiled_structs_, *config);
compiled_structs_, config);
}
std::string kernel_key = make_kernel_key(config, kernel);
if (mode_ > NotCache) {
Expand Down Expand Up @@ -293,7 +293,7 @@ CompiledKernelData CacheManager::compile_and_cache_kernel(
return *params_opt;
}

std::string CacheManager::make_kernel_key(const CompileConfig *config,
std::string CacheManager::make_kernel_key(const CompileConfig &config,
Kernel *kernel) const {
if (mode_ < MemAndDiskCache) {
return kernel->get_name();
Expand Down
4 changes: 2 additions & 2 deletions taichi/cache/gfx/cache_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class CacheManager {

explicit CacheManager(Params &&init_params);

CompiledKernelData load_or_compile(const CompileConfig *config,
CompiledKernelData load_or_compile(const CompileConfig &config,
Kernel *kernel);
void dump_with_merging() const;
void clean_offline_cache(offline_cache::CleanCachePolicy policy,
Expand All @@ -50,7 +50,7 @@ class CacheManager {
const std::string &key);
CompiledKernelData compile_and_cache_kernel(const std::string &key,
Kernel *kernel);
std::string make_kernel_key(const CompileConfig *config,
std::string make_kernel_key(const CompileConfig &config,
Kernel *kernel) const;

Mode mode_{MemCache};
Expand Down
10 changes: 5 additions & 5 deletions taichi/codegen/amdgpu/codegen_amdgpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ using namespace llvm;
class TaskCodeGenAMDGPU : public TaskCodeGenLLVM {
public:
using IRVisitor::visit;
TaskCodeGenAMDGPU(const CompileConfig *config,
TaskCodeGenAMDGPU(const CompileConfig &config,
Kernel *kernel,
IRNode *ir = nullptr)
: TaskCodeGenLLVM(config, kernel, ir) {
Expand Down Expand Up @@ -234,7 +234,7 @@ class TaskCodeGenAMDGPU : public TaskCodeGenLLVM {
init_offloaded_task_function(stmt, "gather_list");
call("gc_parallel_0", get_context(), snode_id);
finalize_offloaded_task_function();
current_task->grid_dim = compile_config->saturating_grid_dim;
current_task->grid_dim = compile_config.saturating_grid_dim;
current_task->block_dim = 64;
offloaded_tasks.push_back(*current_task);
current_task = nullptr;
Expand All @@ -252,7 +252,7 @@ class TaskCodeGenAMDGPU : public TaskCodeGenLLVM {
init_offloaded_task_function(stmt, "zero_fill");
call("gc_parallel_2", get_context(), snode_id);
finalize_offloaded_task_function();
current_task->grid_dim = compile_config->saturating_grid_dim;
current_task->grid_dim = compile_config.saturating_grid_dim;
current_task->block_dim = 64;
offloaded_tasks.push_back(*current_task);
current_task = nullptr;
Expand Down Expand Up @@ -398,7 +398,7 @@ class TaskCodeGenAMDGPU : public TaskCodeGenLLVM {
};

LLVMCompiledTask KernelCodeGenAMDGPU::compile_task(
const CompileConfig *config,
const CompileConfig &config,
std::unique_ptr<llvm::Module> &&module,
OffloadedStmt *stmt) {
TaskCodeGenAMDGPU gen(config, kernel, stmt);
Expand All @@ -407,7 +407,7 @@ LLVMCompiledTask KernelCodeGenAMDGPU::compile_task(

FunctionType KernelCodeGenAMDGPU::compile_to_function() {
auto *llvm_prog = get_llvm_program(prog);
const auto &config = *get_compile_config();
const auto &config = get_compile_config();
auto *tlctx = llvm_prog->get_llvm_context(config.arch);

AMDGPUModuleToFunctionConverter converter{tlctx,
Expand Down
4 changes: 2 additions & 2 deletions taichi/codegen/amdgpu/codegen_amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ namespace lang {

class KernelCodeGenAMDGPU : public KernelCodeGen {
public:
KernelCodeGenAMDGPU(const CompileConfig *config, Kernel *kernel)
KernelCodeGenAMDGPU(const CompileConfig &config, Kernel *kernel)
: KernelCodeGen(config, kernel) {
}

// TODO: Stop defining this macro guards in the headers
#ifdef TI_WITH_LLVM
LLVMCompiledTask compile_task(
const CompileConfig *config,
const CompileConfig &config,
std::unique_ptr<llvm::Module> &&module = nullptr,
OffloadedStmt *stmt = nullptr) override;
#endif // TI_WITH_LLVM
Expand Down
23 changes: 11 additions & 12 deletions taichi/codegen/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@

namespace taichi::lang {

KernelCodeGen::KernelCodeGen(const CompileConfig *compile_config,
KernelCodeGen::KernelCodeGen(const CompileConfig &compile_config,
Kernel *kernel)
: prog(kernel->program), kernel(kernel), compile_config_(compile_config) {
this->ir = kernel->ir.get();
}

std::unique_ptr<KernelCodeGen> KernelCodeGen::create(
const CompileConfig *compile_config,
const CompileConfig &compile_config,
Kernel *kernel) {
#ifdef TI_WITH_LLVM
const auto arch = compile_config->arch;
const auto arch = compile_config.arch;
if (arch_is_cpu(arch) && arch != Arch::wasm) {
return std::make_unique<KernelCodeGenCPU>(compile_config, kernel);
} else if (arch == Arch::wasm) {
Expand Down Expand Up @@ -61,15 +61,14 @@ std::optional<LLVMCompiledKernel>
KernelCodeGen::maybe_read_compilation_from_cache(
const std::string &kernel_key) {
TI_AUTO_PROF;
const auto &config = *compile_config_;
auto *llvm_prog = get_llvm_program(prog);
const auto &reader = llvm_prog->get_cache_reader();
if (!reader) {
return std::nullopt;
}

LlvmOfflineCache::KernelCacheData cache_data;
auto *tlctx = llvm_prog->get_llvm_context(config.arch);
auto *tlctx = llvm_prog->get_llvm_context(compile_config_.arch);
auto &llvm_ctx = *tlctx->get_this_thread_context();

if (!reader->get_kernel_cache(cache_data, kernel_key, llvm_ctx)) {
Expand All @@ -85,12 +84,12 @@ void KernelCodeGen::cache_kernel(const std::string &kernel_key,
}

LLVMCompiledKernel KernelCodeGen::compile_kernel_to_module() {
const auto &config = *compile_config_;
auto *llvm_prog = get_llvm_program(prog);
auto *tlctx = llvm_prog->get_llvm_context(config.arch);
std::string kernel_key = get_hashed_offline_cache_key(&config, kernel);
auto *tlctx = llvm_prog->get_llvm_context(compile_config_.arch);
std::string kernel_key =
get_hashed_offline_cache_key(compile_config_, kernel);
kernel->set_kernel_key_for_cache(kernel_key);
if (config.offline_cache && this->supports_offline_cache() &&
if (compile_config_.offline_cache && this->supports_offline_cache() &&
!kernel->is_evaluator) {
auto res = maybe_read_compilation_from_cache(kernel_key);
if (res) {
Expand All @@ -101,7 +100,7 @@ LLVMCompiledKernel KernelCodeGen::compile_kernel_to_module() {
}
}

irpass::ast_to_ir(config, *kernel, false);
irpass::ast_to_ir(compile_config_, *kernel, false);

auto block = dynamic_cast<Block *>(kernel->ir.get());
auto &worker = get_llvm_program(kernel->program)->compilation_workers;
Expand All @@ -114,8 +113,8 @@ LLVMCompiledKernel KernelCodeGen::compile_kernel_to_module() {
tlctx->fetch_this_thread_struct_module();
auto offload = irpass::analysis::clone(offloads[i].get());
irpass::re_id(offload.get());
auto new_data =
this->compile_task(&config, nullptr, offload->as<OffloadedStmt>());
auto new_data = this->compile_task(compile_config_, nullptr,
offload->as<OffloadedStmt>());
data[i] = std::make_unique<LLVMCompiledTask>(std::move(new_data));
};
if (kernel->is_evaluator) {
Expand Down
10 changes: 5 additions & 5 deletions taichi/codegen/codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ class KernelCodeGen {
IRNode *ir;

public:
explicit KernelCodeGen(const CompileConfig *compile_config, Kernel *kernel);
explicit KernelCodeGen(const CompileConfig &compile_config, Kernel *kernel);

virtual ~KernelCodeGen() = default;

static std::unique_ptr<KernelCodeGen> create(
const CompileConfig *compile_config,
const CompileConfig &compile_config,
Kernel *kernel);

virtual FunctionType compile_to_function() = 0;
Expand All @@ -59,7 +59,7 @@ class KernelCodeGen {
virtual LLVMCompiledKernel compile_kernel_to_module();

virtual LLVMCompiledTask compile_task(
const CompileConfig *config,
const CompileConfig &config,
std::unique_ptr<llvm::Module> &&module = nullptr,
OffloadedStmt *stmt = nullptr){TI_NOT_IMPLEMENTED}

Expand All @@ -69,12 +69,12 @@ class KernelCodeGen {
const LLVMCompiledKernel &data);
#endif
protected:
const CompileConfig *get_compile_config() const {
const CompileConfig &get_compile_config() const {
return compile_config_;
}

private:
const CompileConfig *compile_config_{nullptr};
const CompileConfig &compile_config_;
};

#ifdef TI_WITH_LLVM
Expand Down
4 changes: 2 additions & 2 deletions taichi/codegen/codegen_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

namespace taichi::lang {

inline bool codegen_vector_type(const CompileConfig *config) {
return !config->real_matrix_scalarize;
inline bool codegen_vector_type(const CompileConfig &config) {
return !config.real_matrix_scalarize;
}

} // namespace taichi::lang
Loading

0 comments on commit db4267d

Please sign in to comment.