Skip to content

Commit

Permalink
[Minor] Typo Fixes (apache#10000)
Browse files Browse the repository at this point in the history
* Fix typos.

* Missed funtion -> function.
  • Loading branch information
zxybazh authored and ylc committed Feb 16, 2022
1 parent db91395 commit 5356739
Show file tree
Hide file tree
Showing 17 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion python/tvm/relay/backend/contrib/ethosu/legalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def __call__(self, *args, **kwargs):

def sigmoid_calc_func(x: float) -> float:
"""Function to calculate the values for sigmoid"""
# Thse limits are inherited from TFLite
# These limits are inherited from TFLite
upper_limit = 8.0
lower_limit = -8.0

Expand Down
8 changes: 4 additions & 4 deletions python/tvm/relay/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -716,11 +716,11 @@ def gru_cell(
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (3 * hidden_size)
r_act : relay.op
activation funtion for reset gate. it is sigmoid by default
activation function for reset gate. it is sigmoid by default
z_act : relay.op
activation funtion for update gate. it is sigmoid by default
activation function for update gate. it is sigmoid by default
n_act : relay.op
activation funtion for new gate. it is tanh by default
activation function for new gate. it is tanh by default
backwards : bool
Flag for reverse pass of GRU
Expand Down Expand Up @@ -812,7 +812,7 @@ def lstm_cell(
p_i, p_f, p_o : relay.Expr
peephole LSTM matrices. shape = (batch, hidden_size)
f_act, g_act, h_act : relay.op
activation funtions
activation functions
backwards : bool
Flag for reverse pass of LSTM
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/runtime/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def __setitem__(self, in_slice, value):
raise TypeError("type %s not supported" % str(type(value)))

def copyfrom(self, source_array):
"""Peform an synchronize copy from the array.
"""Perform an synchronize copy from the array.
Parameters
----------
Expand Down
6 changes: 3 additions & 3 deletions python/tvm/topi/cuda/batch_matmul_tensorcore.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def _schedule(cfg, s, C):
s[BF].reorder(bs, o, i, o_ii, i_ii)

# Schedule for A's(B's) shared memory load
def shared_shedule(stage, strides):
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
bs, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
Expand All @@ -217,8 +217,8 @@ def shared_shedule(stage, strides):
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)

shared_shedule(AS, AS_align)
shared_shedule(BS, BS_align)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)

shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
Expand Down
6 changes: 3 additions & 3 deletions python/tvm/topi/cuda/conv2d_nhwc_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def schedule_bgemm_tensorcore(cfg, s, bgemm, data_pack, kernel_pack):
s[BF].reorder(i, o, i_ii, o_ii)

# Schedule for A's(B's) shared memory load
def shared_shedule(stage, strides):
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
_, _, xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
Expand All @@ -179,8 +179,8 @@ def shared_shedule(stage, strides):
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)

shared_shedule(AS, AS_align)
shared_shedule(BS, BS_align)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)

shape = (wmma_m, wmma_n, wmma_k)
in_dtype = "float16"
Expand Down
6 changes: 3 additions & 3 deletions python/tvm/topi/cuda/dense_tensorcore.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def _schedule_dense_tensorcore(cfg, s, C):
s[BF].reorder(o, i, o_ii, i_ii)

# Schedule for A's(B's) shared memory load
def shared_shedule(stage, strides):
def shared_schedule(stage, strides):
s[stage].compute_at(s[CF], ko)
xo, yo = stage.op.axis
s[stage].storage_align(xo, strides - 1, strides)
Expand All @@ -252,8 +252,8 @@ def shared_shedule(stage, strides):
s[stage].bind(tx, thread_x)
s[stage].vectorize(vi)

shared_shedule(AS, AS_align)
shared_shedule(BS, BS_align)
shared_schedule(AS, AS_align)
shared_schedule(BS, BS_align)

shape = (wmma_m, wmma_n, wmma_k)
AL_gemm = te.placeholder((wmma_m, wmma_k), name="AL_gemm", dtype=data_dtype)
Expand Down
4 changes: 2 additions & 2 deletions src/arith/int_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ inline bool WillOverflow<tir::ModNode>(int64_t x, int64_t y, int64_t min_value,
}

/*!
* \brief Peform trunc division of two integers.
* \brief Perform trunc division of two integers.
* \param x The left operand.
* \param y The right operand.
* \return the result.
Expand All @@ -94,7 +94,7 @@ inline int64_t truncdiv(int64_t x, int64_t y) { return x / y; }
inline int64_t truncmod(int64_t x, int64_t y) { return x % y; }

/*!
* \brief Peform floor division of two integers.
* \brief Perform floor division of two integers.
* \param x The left operand.
* \param y The right operand.
* \return the result.
Expand Down
2 changes: 1 addition & 1 deletion src/relay/backend/te_compiler_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ class ScheduleBuilder : public backend::MemoizedExprTranslator<Array<te::Tensor>
}
}

// Use TOPI schdule if user specificed, or the function has no auto_scheduler schedule.
// Use TOPI schedule if user specificed, or the function has no auto_scheduler schedule.
if (!schedule.defined() && !prim_func.defined()) {
ICHECK(anchor_implementation_.defined());
schedule = anchor_implementation_.Schedule(anchor_attrs_, tensor_outs, target_);
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/rpc/rpc_local_session.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class LocalSession : public RPCSession {

protected:
/*!
* \brief internal encode return fucntion.
* \brief internal encode return function.
* \param rv The return value.
* \param encode_return The encoding function.
*/
Expand Down
2 changes: 1 addition & 1 deletion src/support/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class RingBuffer {
}

/*!
* \brief Peform a non-blocking read from buffer
* \brief Perform a non-blocking read from buffer
* size must be smaller than this->bytes_available()
* \param data the data pointer.
* \param size The number of bytes to read.
Expand Down
6 changes: 3 additions & 3 deletions src/support/socket.h
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ class TCPSocket : public Socket {
[&]() { return recv(sockfd, buf, static_cast<sock_size_t>(len), flags); });
}
/*!
* \brief peform block write that will attempt to send all data out
* \brief perform block write that will attempt to send all data out
* can still return smaller than request when error occurs
* \param buf_ the pointer to the buffer
* \param len the size of the buffer
Expand All @@ -538,7 +538,7 @@ class TCPSocket : public Socket {
return ndone;
}
/*!
* \brief peform block read that will attempt to read all data
* \brief perform block read that will attempt to read all data
* can still return smaller than request when error occurs
* \param buf_ the buffer pointer
* \param len length of data to recv
Expand Down Expand Up @@ -654,7 +654,7 @@ struct PollHelper {
}

/*!
* \brief peform poll on the set defined, read, write, exception
* \brief perform poll on the set defined, read, write, exception
* \param timeout specify timeout in milliseconds(ms) if negative, means poll will block
* \return
*/
Expand Down
2 changes: 1 addition & 1 deletion src/te/operation/create_primfunc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ PrimFunc CreatePrimFunc(const Array<te::Tensor>& arg_list) {
<< "Only te.placeholder and te.compute are allowed for now.";
}

// Infomations used in CreatePrimFunc and its sub-funtions.
// Infomations used in CreatePrimFunc and its sub-functions.
CreateFuncInfo info(arg_list);
// Root body stmts.
Array<Stmt> root_stmts;
Expand Down
2 changes: 1 addition & 1 deletion src/te/operation/tensorize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ namespace te {

using namespace tir;

// Detect the region of input and output to be tensrized.
// Detect the region of input and output to be tensorized.
// out_dom: the domain of root iter vars in output op
// in_region: region of each input tensor.
// return The location of the tensorized scope start.
Expand Down
2 changes: 1 addition & 1 deletion src/tir/schedule/primitive/cache_read_write.cc
Original file line number Diff line number Diff line change
Expand Up @@ -635,7 +635,7 @@ StmtSRef CacheRead(ScheduleState self, const StmtSRef& block_sref, int read_buff
/*require_subtree_compact_dataflow=*/false);
const BlockNode* scope_block = TVM_SREF_TO_BLOCK(scope_block, scope_sref);

// Step 2. Creat CacheStageInfo
// Step 2. Create CacheStageInfo
CacheStageInfo info;
info.read_buffer = read_buffer;
// Create the corresponding buffer to be written, i.e. result of cache_read
Expand Down
2 changes: 1 addition & 1 deletion src/tir/transforms/coproc_sync.cc
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ class CoProcInstDepDetector : public StmtVisitor {
std::unordered_set<int> exit_ctx;
// existing pop performed at enter
std::vector<std::pair<int, int> > enter_pop;
// existing push peformed at exit
// existing push performed at exit
std::vector<std::pair<int, int> > exit_push;
// clear the state
void clear() {
Expand Down
4 changes: 2 additions & 2 deletions web/src/compact.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
/** NodeJS and Web compact layer */

/**
* Get performance masurement.
* Get performance measurement.
*/
export function getPeformance(): Performance {
export function getPerformance(): Performance {
if (typeof performance == "undefined") {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const performanceNode = require("perf_hooks");
Expand Down
4 changes: 2 additions & 2 deletions web/src/runtime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -653,7 +653,7 @@ class GraphExecutor implements Disposable {
*/
async benchmarkRuns(dev: DLDevice, number=10, repeat=4): Promise<number[]> {
// Skip first run as it can involve GPU warmup and module loading time.
const perf = compact.getPeformance();
const perf = compact.getPerformance();
const results = [];
this.run();
await dev.sync();
Expand Down Expand Up @@ -1049,7 +1049,7 @@ export class Instance implements Disposable {
/** Register global packed functions needed by the backend to the env. */
private registerEnvGlobalPackedFuncs(): void {
// Register the timer function to enable the time_evaluator.
const perf = compact.getPeformance();
const perf = compact.getPerformance();

// Helper function to time the finvoke
const timeExecution = async (
Expand Down

0 comments on commit 5356739

Please sign in to comment.