Skip to content

Commit

Permalink
Run pgindent.
Browse files Browse the repository at this point in the history
Sravan Velagandula.
  • Loading branch information
jeevanchalke committed Dec 1, 2022
1 parent 9872baf commit c3667b7
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 20 deletions.
15 changes: 8 additions & 7 deletions hdfs_deparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -1518,10 +1518,10 @@ hdfs_deparse_const(Const *node, deparse_expr_cxt *context)
case FLOAT4OID:
case FLOAT8OID:
case NUMERICOID:
if (context->is_limit_node)
appendStringInfoString(buf, extval);
else
appendStringInfo(buf, "'%s'", extval);
if (context->is_limit_node)
appendStringInfoString(buf, extval);
else
appendStringInfo(buf, "'%s'", extval);
break;
case BITOID:
case VARBITOID:
Expand Down Expand Up @@ -2392,7 +2392,8 @@ hdfs_append_limit_clause(deparse_expr_cxt *context)
if (root->parse->limitCount)
{
StringInfo buf = context->buf;
Const *c = (Const *) root->parse->limitOffset;
Const *c = (Const *) root->parse->limitOffset;

appendStringInfoString(buf, " LIMIT ");
context->is_limit_node = true;

Expand Down Expand Up @@ -2440,8 +2441,8 @@ hdfs_is_foreign_pathkey(PlannerInfo *root, RelOptInfo *baserel,
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;

/*
* hdfs_is_foreign_expr would detect volatile expressions as well,
* but checking ec_has_volatile here saves some cycles.
* hdfs_is_foreign_expr would detect volatile expressions as well, but
* checking ec_has_volatile here saves some cycles.
*/
if (pathkey_ec->ec_has_volatile)
return false;
Expand Down
20 changes: 10 additions & 10 deletions hdfs_fdw.c
Original file line number Diff line number Diff line change
Expand Up @@ -1882,10 +1882,10 @@ hdfs_build_whole_row_constr_info(hdfsFdwExecutionState *festate,

/*
* Construct the array mapping columns in the ForeignScan node output to
* their positions in the result fetched from the foreign server.
* Positive values indicate the locations in the result and negative
* values indicate the range table indexes of the base table whose
* whole-row reference values are requested in that place.
* their positions in the result fetched from the foreign server. Positive
* values indicate the locations in the result and negative values
* indicate the range table indexes of the base table whose whole-row
* reference values are requested in that place.
*/
fs_num_atts = list_length(fdw_scan_tlist);
fs_attr_pos = (int *) palloc(sizeof(int) * fs_num_atts);
Expand Down Expand Up @@ -2296,7 +2296,7 @@ hdfsGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage,
break;
case UPPERREL_FINAL:
hdfs_add_foreign_final_paths(root, input_rel, output_rel,
(FinalPathExtraData *)extra);
(FinalPathExtraData *) extra);
break;
default:
elog(ERROR, "unexpected upper relation: %d", (int) stage);
Expand Down Expand Up @@ -3102,8 +3102,8 @@ hdfs_add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
/*
* hdfs doesn't support LIMIT/OFFSET NULL/ALL syntax, however, only check
* for LIMIT NULL/ALL because one can specify a valid LIMIT value with
* OFFSET NULL. This can be handled by checking for OFFSET NULL
* during deparse and ignoring it. If const LIMIT node is null then do not
* OFFSET NULL. This can be handled by checking for OFFSET NULL during
* deparse and ignoring it. If const LIMIT node is null then do not
* pushdown limit/offset clause.
*/
if (parse->limitCount)
Expand Down Expand Up @@ -3163,7 +3163,7 @@ hdfs_add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
/* and add it to the final_rel */
add_path(final_rel, (Path *) final_path);
}
#endif /* PG_VERSION_NUM >= 120000 */
#endif /* PG_VERSION_NUM >= 120000 */

/*
* hdfs_get_sortby_direction_string
Expand Down Expand Up @@ -3201,8 +3201,8 @@ hdfs_get_sortby_direction_string(EquivalenceMember *em, PathKey *pathkey)
* Here we need to use the expression's actual type to discover whether
* the desired operator will be the default or not.
*/
typentry = lookup_type_cache(exprType((Node *)em->em_expr),
TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
typentry = lookup_type_cache(exprType((Node *) em->em_expr),
TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);

if (oprid == typentry->lt_opr)
return "ASC";
Expand Down
6 changes: 3 additions & 3 deletions hdfs_query.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ hdfs_describe(int con_index, hdfs_opt *opt, Relation rel)
* hdfs_deparse_describe() sends a query of the form "DESCRIBE FORMATTED
* sometab" to the remote server. This produces the output in the
* columnar format. The 'totalSize' is placed in the 1st column (indexed
* by 0) and its value is placed in the 2nd column of the same row.
* Hence, we directly search for the 1st column of each row until we find
* the 'totalSize', and once we find that, only then we retrieve the 2nd
* by 0) and its value is placed in the 2nd column of the same row. Hence,
* we directly search for the 1st column of each row until we find the
* 'totalSize', and once we find that, only then we retrieve the 2nd
* column of that row and break.
*/
while (hdfs_fetch(con_index) == 0)
Expand Down

0 comments on commit c3667b7

Please sign in to comment.