diff --git a/python/tvm/relay/backend/contrib/ethosu/legalize.py b/python/tvm/relay/backend/contrib/ethosu/legalize.py index 8f5d6c24f0f6..d52f3ba6eca5 100644 --- a/python/tvm/relay/backend/contrib/ethosu/legalize.py +++ b/python/tvm/relay/backend/contrib/ethosu/legalize.py @@ -230,7 +230,7 @@ def __call__(self, *args, **kwargs): def sigmoid_calc_func(x: float) -> float: """Function to calculate the values for sigmoid""" - # Thse limits are inherited from TFLite + # These limits are inherited from TFLite upper_limit = 8.0 lower_limit = -8.0 diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index 1d4c8ad75762..f8c12ff334db 100755 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -716,11 +716,11 @@ def gru_cell( b_inp, b_hid : relay.Expr bias matrices. The same order of internal parts as for weights. shape = (3 * hidden_size) r_act : relay.op - activation funtion for reset gate. it is sigmoid by default + activation function for reset gate. it is sigmoid by default z_act : relay.op - activation funtion for update gate. it is sigmoid by default + activation function for update gate. it is sigmoid by default n_act : relay.op - activation funtion for new gate. it is tanh by default + activation function for new gate. it is tanh by default backwards : bool Flag for reverse pass of GRU @@ -812,7 +812,7 @@ def lstm_cell( p_i, p_f, p_o : relay.Expr peephole LSTM matrices. shape = (batch, hidden_size) f_act, g_act, h_act : relay.op - activation funtions + activation functions backwards : bool Flag for reverse pass of LSTM diff --git a/python/tvm/runtime/ndarray.py b/python/tvm/runtime/ndarray.py index 2b9f7f9446ba..8400a5998e39 100644 --- a/python/tvm/runtime/ndarray.py +++ b/python/tvm/runtime/ndarray.py @@ -127,7 +127,7 @@ def __setitem__(self, in_slice, value): raise TypeError("type %s not supported" % str(type(value))) def copyfrom(self, source_array): - """Peform an synchronize copy from the array. + """Perform an synchronize copy from the array. Parameters ---------- diff --git a/python/tvm/topi/cuda/batch_matmul_tensorcore.py b/python/tvm/topi/cuda/batch_matmul_tensorcore.py index d05984b91393..ac16dd7b65b4 100644 --- a/python/tvm/topi/cuda/batch_matmul_tensorcore.py +++ b/python/tvm/topi/cuda/batch_matmul_tensorcore.py @@ -203,7 +203,7 @@ def _schedule(cfg, s, C): s[BF].reorder(bs, o, i, o_ii, i_ii) # Schedule for A's(B's) shared memory load - def shared_shedule(stage, strides): + def shared_schedule(stage, strides): s[stage].compute_at(s[CF], ko) bs, xo, yo = stage.op.axis s[stage].storage_align(xo, strides - 1, strides) @@ -217,8 +217,8 @@ def shared_shedule(stage, strides): s[stage].bind(tx, thread_x) s[stage].vectorize(vi) - shared_shedule(AS, AS_align) - shared_shedule(BS, BS_align) + shared_schedule(AS, AS_align) + shared_schedule(BS, BS_align) shape = (wmma_m, wmma_n, wmma_k) AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype) diff --git a/python/tvm/topi/cuda/conv2d_nhwc_winograd.py b/python/tvm/topi/cuda/conv2d_nhwc_winograd.py index 1e368f585354..698beeac6dc4 100644 --- a/python/tvm/topi/cuda/conv2d_nhwc_winograd.py +++ b/python/tvm/topi/cuda/conv2d_nhwc_winograd.py @@ -165,7 +165,7 @@ def schedule_bgemm_tensorcore(cfg, s, bgemm, data_pack, kernel_pack): s[BF].reorder(i, o, i_ii, o_ii) # Schedule for A's(B's) shared memory load - def shared_shedule(stage, strides): + def shared_schedule(stage, strides): s[stage].compute_at(s[CF], ko) _, _, xo, yo = stage.op.axis s[stage].storage_align(xo, strides - 1, strides) @@ -179,8 +179,8 @@ def shared_shedule(stage, strides): s[stage].bind(tx, thread_x) s[stage].vectorize(vi) - shared_shedule(AS, AS_align) - shared_shedule(BS, BS_align) + shared_schedule(AS, AS_align) + shared_schedule(BS, BS_align) shape = (wmma_m, wmma_n, wmma_k) in_dtype = "float16" diff --git a/python/tvm/topi/cuda/dense_tensorcore.py b/python/tvm/topi/cuda/dense_tensorcore.py index 20ff1aaccc5f..7acc1307f84c 100644 --- a/python/tvm/topi/cuda/dense_tensorcore.py +++ b/python/tvm/topi/cuda/dense_tensorcore.py @@ -238,7 +238,7 @@ def _schedule_dense_tensorcore(cfg, s, C): s[BF].reorder(o, i, o_ii, i_ii) # Schedule for A's(B's) shared memory load - def shared_shedule(stage, strides): + def shared_schedule(stage, strides): s[stage].compute_at(s[CF], ko) xo, yo = stage.op.axis s[stage].storage_align(xo, strides - 1, strides) @@ -252,8 +252,8 @@ def shared_shedule(stage, strides): s[stage].bind(tx, thread_x) s[stage].vectorize(vi) - shared_shedule(AS, AS_align) - shared_shedule(BS, BS_align) + shared_schedule(AS, AS_align) + shared_schedule(BS, BS_align) shape = (wmma_m, wmma_n, wmma_k) AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype) diff --git a/src/arith/int_operator.h b/src/arith/int_operator.h index eff52308f389..6dec9a5502e1 100644 --- a/src/arith/int_operator.h +++ b/src/arith/int_operator.h @@ -78,7 +78,7 @@ inline bool WillOverflow(int64_t x, int64_t y, int64_t min_value, } /*! - * \brief Peform trunc division of two integers. + * \brief Perform trunc division of two integers. * \param x The left operand. * \param y The right operand. * \return the result. @@ -94,7 +94,7 @@ inline int64_t truncdiv(int64_t x, int64_t y) { return x / y; } inline int64_t truncmod(int64_t x, int64_t y) { return x % y; } /*! - * \brief Peform floor division of two integers. + * \brief Perform floor division of two integers. * \param x The left operand. * \param y The right operand. * \return the result. diff --git a/src/relay/backend/te_compiler_cache.cc b/src/relay/backend/te_compiler_cache.cc index 5ed93914ac53..4b77cb14d48b 100644 --- a/src/relay/backend/te_compiler_cache.cc +++ b/src/relay/backend/te_compiler_cache.cc @@ -192,7 +192,7 @@ class ScheduleBuilder : public backend::MemoizedExprTranslator } } - // Use TOPI schdule if user specificed, or the function has no auto_scheduler schedule. + // Use TOPI schedule if user specificed, or the function has no auto_scheduler schedule. if (!schedule.defined() && !prim_func.defined()) { ICHECK(anchor_implementation_.defined()); schedule = anchor_implementation_.Schedule(anchor_attrs_, tensor_outs, target_); diff --git a/src/runtime/rpc/rpc_local_session.h b/src/runtime/rpc/rpc_local_session.h index d1b54d5be65b..a081cf97db4a 100644 --- a/src/runtime/rpc/rpc_local_session.h +++ b/src/runtime/rpc/rpc_local_session.h @@ -60,7 +60,7 @@ class LocalSession : public RPCSession { protected: /*! - * \brief internal encode return fucntion. + * \brief internal encode return function. * \param rv The return value. * \param encode_return The encoding function. */ diff --git a/src/support/ring_buffer.h b/src/support/ring_buffer.h index af814158f7b6..1c6a6f8b4350 100644 --- a/src/support/ring_buffer.h +++ b/src/support/ring_buffer.h @@ -87,7 +87,7 @@ class RingBuffer { } /*! - * \brief Peform a non-blocking read from buffer + * \brief Perform a non-blocking read from buffer * size must be smaller than this->bytes_available() * \param data the data pointer. * \param size The number of bytes to read. diff --git a/src/support/socket.h b/src/support/socket.h index a83a67c85d76..42d5d9004c15 100644 --- a/src/support/socket.h +++ b/src/support/socket.h @@ -516,7 +516,7 @@ class TCPSocket : public Socket { [&]() { return recv(sockfd, buf, static_cast(len), flags); }); } /*! - * \brief peform block write that will attempt to send all data out + * \brief perform block write that will attempt to send all data out * can still return smaller than request when error occurs * \param buf_ the pointer to the buffer * \param len the size of the buffer @@ -538,7 +538,7 @@ class TCPSocket : public Socket { return ndone; } /*! - * \brief peform block read that will attempt to read all data + * \brief perform block read that will attempt to read all data * can still return smaller than request when error occurs * \param buf_ the buffer pointer * \param len length of data to recv @@ -654,7 +654,7 @@ struct PollHelper { } /*! - * \brief peform poll on the set defined, read, write, exception + * \brief perform poll on the set defined, read, write, exception * \param timeout specify timeout in milliseconds(ms) if negative, means poll will block * \return */ diff --git a/src/te/operation/create_primfunc.cc b/src/te/operation/create_primfunc.cc index 5de0538960fc..94dd0b044d71 100644 --- a/src/te/operation/create_primfunc.cc +++ b/src/te/operation/create_primfunc.cc @@ -282,7 +282,7 @@ PrimFunc CreatePrimFunc(const Array& arg_list) { << "Only te.placeholder and te.compute are allowed for now."; } - // Infomations used in CreatePrimFunc and its sub-funtions. + // Infomations used in CreatePrimFunc and its sub-functions. CreateFuncInfo info(arg_list); // Root body stmts. Array root_stmts; diff --git a/src/te/operation/tensorize.cc b/src/te/operation/tensorize.cc index 9e2d3d0e725f..b31b61b739c1 100644 --- a/src/te/operation/tensorize.cc +++ b/src/te/operation/tensorize.cc @@ -35,7 +35,7 @@ namespace te { using namespace tir; -// Detect the region of input and output to be tensrized. +// Detect the region of input and output to be tensorized. // out_dom: the domain of root iter vars in output op // in_region: region of each input tensor. // return The location of the tensorized scope start. diff --git a/src/tir/schedule/primitive/cache_read_write.cc b/src/tir/schedule/primitive/cache_read_write.cc index 159171ecae31..4a80279d97cb 100644 --- a/src/tir/schedule/primitive/cache_read_write.cc +++ b/src/tir/schedule/primitive/cache_read_write.cc @@ -635,7 +635,7 @@ StmtSRef CacheRead(ScheduleState self, const StmtSRef& block_sref, int read_buff /*require_subtree_compact_dataflow=*/false); const BlockNode* scope_block = TVM_SREF_TO_BLOCK(scope_block, scope_sref); - // Step 2. Creat CacheStageInfo + // Step 2. Create CacheStageInfo CacheStageInfo info; info.read_buffer = read_buffer; // Create the corresponding buffer to be written, i.e. result of cache_read diff --git a/src/tir/transforms/coproc_sync.cc b/src/tir/transforms/coproc_sync.cc index 424a1bbb0ae6..7a6d2d37c376 100644 --- a/src/tir/transforms/coproc_sync.cc +++ b/src/tir/transforms/coproc_sync.cc @@ -450,7 +450,7 @@ class CoProcInstDepDetector : public StmtVisitor { std::unordered_set exit_ctx; // existing pop performed at enter std::vector > enter_pop; - // existing push peformed at exit + // existing push performed at exit std::vector > exit_push; // clear the state void clear() { diff --git a/web/src/compact.ts b/web/src/compact.ts index 29569b5d005d..ac6af35abeff 100644 --- a/web/src/compact.ts +++ b/web/src/compact.ts @@ -19,9 +19,9 @@ /** NodeJS and Web compact layer */ /** - * Get performance masurement. + * Get performance measurement. */ -export function getPeformance(): Performance { +export function getPerformance(): Performance { if (typeof performance == "undefined") { // eslint-disable-next-line @typescript-eslint/no-var-requires const performanceNode = require("perf_hooks"); diff --git a/web/src/runtime.ts b/web/src/runtime.ts index 60a28d53f361..b0e71d945f8a 100644 --- a/web/src/runtime.ts +++ b/web/src/runtime.ts @@ -653,7 +653,7 @@ class GraphExecutor implements Disposable { */ async benchmarkRuns(dev: DLDevice, number=10, repeat=4): Promise { // Skip first run as it can involve GPU warmup and module loading time. - const perf = compact.getPeformance(); + const perf = compact.getPerformance(); const results = []; this.run(); await dev.sync(); @@ -1049,7 +1049,7 @@ export class Instance implements Disposable { /** Register global packed functions needed by the backend to the env. */ private registerEnvGlobalPackedFuncs(): void { // Register the timer function to enable the time_evaluator. - const perf = compact.getPeformance(); + const perf = compact.getPerformance(); // Helper function to time the finvoke const timeExecution = async (