diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index eabf7460182..77677150aff 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -615,46 +615,45 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, * empty. */ newNodeBuffer = gistGetNodeBuffer(gfbb, giststate, BufferGetBlockNumber(si->buf), level); relocationBuffersInfos[i].nodeBuffer = newNodeBuffer; relocationBuffersInfos[i].splitinfo = si; i++; } /* * Loop through all index tuples in the buffer of the page being split, * moving them to buffers for the new pages. We try to move each tuple to * the page that will result in the lowest penalty for the leading column * or, in the case of a tie, the lowest penalty for the earliest column * that is not tied. * * The page searching logic is very similar to gistchoose(). */ while (gistPopItupFromNodeBuffer(gfbb, &oldBuf, &itup)) { float best_penalty[INDEX_MAX_KEYS]; - int i, - which; + int which; IndexTuple newtup; RelocationBufferInfo *targetBufferInfo; gistDeCompressAtt(giststate, r, itup, NULL, (OffsetNumber) 0, entry, isnull); /* default to using first page (shouldn't matter) */ which = 0; /* * best_penalty[j] is the best penalty we have seen so far for column * j, or -1 when we haven't yet examined column j. Array entries to * the right of the first -1 are undefined. */ best_penalty[0] = -1; /* * Loop over possible target pages, looking for one to move this tuple * to. */ for (i = 0; i < splitPagesCount; i++) { diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index 2e68303cbfd..e88213c7425 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -221,45 +221,44 @@ hash_xlog_add_ovfl_page(XLogReaderState *record) PageSetLSN(leftpage, lsn); MarkBufferDirty(leftbuf); } if (BufferIsValid(leftbuf)) UnlockReleaseBuffer(leftbuf); UnlockReleaseBuffer(ovflbuf); /* * Note: in normal operation, we'd update the bitmap and meta page while * still holding lock on the overflow pages. But during replay it's not * necessary to hold those locks, since no other index updates can be * happening concurrently. */ if (XLogRecHasBlockRef(record, 2)) { Buffer mapbuffer; if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO) { Page mappage = (Page) BufferGetPage(mapbuffer); uint32 *freep = NULL; - char *data; uint32 *bitmap_page_bit; freep = HashPageGetBitmap(mappage); data = XLogRecGetBlockData(record, 2, &datalen); bitmap_page_bit = (uint32 *) data; SETBIT(freep, *bitmap_page_bit); PageSetLSN(mappage, lsn); MarkBufferDirty(mapbuffer); } if (BufferIsValid(mapbuffer)) UnlockReleaseBuffer(mapbuffer); } if (XLogRecHasBlockRef(record, 3)) { Buffer newmapbuf; newmapbuf = XLogInitBufferForRedo(record, 3); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index aab8d6fa4e5..3133d1e0585 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6256,45 +6256,45 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, return multi; } /* * Do a more thorough second pass over the multi to figure out which * member XIDs actually need to be kept. Checking the precise status of * individual members might even show that we don't need to keep anything. */ nnewmembers = 0; newmembers = palloc(sizeof(MultiXactMember) * nmembers); has_lockers = false; update_xid = InvalidTransactionId; update_committed = false; temp_xid_out = *mxid_oldest_xid_out; /* init for FRM_RETURN_IS_MULTI */ for (i = 0; i < nmembers; i++) { /* * Determine whether to keep this member or ignore it. */ if (ISUPDATE_from_mxstatus(members[i].status)) { - TransactionId xid = members[i].xid; + xid = members[i].xid; Assert(TransactionIdIsValid(xid)); if (TransactionIdPrecedes(xid, relfrozenxid)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg_internal("found update xid %u from before relfrozenxid %u", xid, relfrozenxid))); /* * It's an update; should we keep it? If the transaction is known * aborted or crashed then it's okay to ignore it, otherwise not. * Note that an updater older than cutoff_xid cannot possibly be * committed, because HeapTupleSatisfiesVacuum would have returned * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple. * * As with all tuple visibility routines, it's critical to test * TransactionIdIsInProgress before TransactionIdDidCommit, * because of race conditions explained in detail in * heapam_visibility.c. */ if (TransactionIdIsCurrentTransactionId(xid) || TransactionIdIsInProgress(xid)) diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 8f7d12950e5..ec57f56adf3 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1595,45 +1595,44 @@ mXactCachePut(MultiXactId multi, int nmembers, MultiXactMember *members) debug_elog2(DEBUG2, "CachePut: initializing memory context"); MXactContext = AllocSetContextCreate(TopTransactionContext, "MultiXact cache context", ALLOCSET_SMALL_SIZES); } entry = (mXactCacheEnt *) MemoryContextAlloc(MXactContext, offsetof(mXactCacheEnt, members) + nmembers * sizeof(MultiXactMember)); entry->multi = multi; entry->nmembers = nmembers; memcpy(entry->members, members, nmembers * sizeof(MultiXactMember)); /* mXactCacheGetBySet assumes the entries are sorted, so sort them */ qsort(entry->members, nmembers, sizeof(MultiXactMember), mxactMemberComparator); dlist_push_head(&MXactCache, &entry->node); if (MXactCacheMembers++ >= MAX_CACHE_ENTRIES) { dlist_node *node; - mXactCacheEnt *entry; node = dlist_tail_node(&MXactCache); dlist_delete(node); MXactCacheMembers--; entry = dlist_container(mXactCacheEnt, node, node); debug_elog3(DEBUG2, "CachePut: pruning cached multi %u", entry->multi); pfree(entry); } } static char * mxstatus_to_string(MultiXactStatus status) { switch (status) { case MultiXactStatusForKeyShare: return "keysh"; case MultiXactStatusForShare: return "sh"; diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a090cada400..537845cada7 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4701,45 +4701,44 @@ XLogInitNewTimeline(TimeLineID endTLI, XLogRecPtr endOfLog, TimeLineID newTLI) /* * Make a copy of the file on the new timeline. * * Writing WAL isn't allowed yet, so there are no locking * considerations. But we should be just as tense as XLogFileInit to * avoid emplacing a bogus file. */ XLogFileCopy(newTLI, endLogSegNo, endTLI, endLogSegNo, XLogSegmentOffset(endOfLog, wal_segment_size)); } else { /* * The switch happened at a segment boundary, so just create the next * segment on the new timeline. */ int fd; fd = XLogFileInit(startLogSegNo, newTLI); if (close(fd) != 0) { - char xlogfname[MAXFNAMELEN]; int save_errno = errno; XLogFileName(xlogfname, newTLI, startLogSegNo, wal_segment_size); errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", xlogfname))); } } /* * Let's just make real sure there are not .ready or .done flags posted * for the new segment. */ XLogFileName(xlogfname, newTLI, startLogSegNo, wal_segment_size); XLogArchiveCleanup(xlogfname); } /* * Perform cleanup actions at the conclusion of archive recovery. */ static void diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index e7e37146f69..e6fcfc23b93 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -102,45 +102,44 @@ compute_return_type(TypeName *returnType, Oid languageOid, if (typtup) { if (!((Form_pg_type) GETSTRUCT(typtup))->typisdefined) { if (languageOid == SQLlanguageId) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("SQL function cannot return shell type %s", TypeNameToString(returnType)))); else ereport(NOTICE, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("return type %s is only a shell", TypeNameToString(returnType)))); } rettype = typeTypeId(typtup); ReleaseSysCache(typtup); } else { char *typnam = TypeNameToString(returnType); Oid namespaceId; - AclResult aclresult; char *typname; ObjectAddress address; /* * Only C-coded functions can be I/O functions. We enforce this * restriction here mainly to prevent littering the catalogs with * shell types due to simple typos in user-defined function * definitions. */ if (languageOid != INTERNALlanguageId && languageOid != ClanguageId) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("type \"%s\" does not exist", typnam))); /* Reject if there's typmod decoration, too */ if (returnType->typmods != NIL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("type modifier cannot be specified for shell type \"%s\"", typnam))); @@ -1093,46 +1092,44 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) language = "sql"; else ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("no language specified"))); } /* Look up the language and validate permissions */ languageTuple = SearchSysCache1(LANGNAME, PointerGetDatum(language)); if (!HeapTupleIsValid(languageTuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("language \"%s\" does not exist", language), (extension_file_exists(language) ? errhint("Use CREATE EXTENSION to load the language into the database.") : 0))); languageStruct = (Form_pg_language) GETSTRUCT(languageTuple); languageOid = languageStruct->oid; if (languageStruct->lanpltrusted) { /* if trusted language, need USAGE privilege */ - AclResult aclresult; - aclresult = pg_language_aclcheck(languageOid, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } else { /* if untrusted language, must be superuser */ if (!superuser()) aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } languageValidator = languageStruct->lanvalidator; ReleaseSysCache(languageTuple); /* * Only superuser is allowed to create leakproof functions because * leakproof functions can see tuples which have not yet been filtered out * by security barrier views or row-level security policies. */ diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 29bc26669b0..a250a33f8cb 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -2465,45 +2465,44 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, * there be only one query. */ if (options->must_return_tuples && plan->plancache_list == NIL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("empty query does not return tuples"))); foreach(lc1, plan->plancache_list) { CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1); List *stmt_list; ListCell *lc2; spicallbackarg.query = plansource->query_string; /* * If this is a one-shot plan, we still need to do parse analysis. */ if (plan->oneshot) { RawStmt *parsetree = plansource->raw_parse_tree; const char *src = plansource->query_string; - List *stmt_list; /* * Parameter datatypes are driven by parserSetup hook if provided, * otherwise we use the fixed parameter list. */ if (parsetree == NULL) stmt_list = NIL; else if (plan->parserSetup != NULL) { Assert(plan->nargs == 0); stmt_list = pg_analyze_and_rewrite_withcb(parsetree, src, plan->parserSetup, plan->parserSetupArg, _SPI_current->queryEnv); } else { stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, src, plan->argtypes, plan->nargs, diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 7d176e7b00a..0557e945ca7 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -2169,45 +2169,45 @@ match_clause_to_index(PlannerInfo *root, * but what if someone builds an expression index on a constant? It's not * totally unreasonable to do so with a partial index, either.) */ if (rinfo->pseudoconstant) return; /* * If clause can't be used as an indexqual because it must wait till after * some lower-security-level restriction clause, reject it. */ if (!restriction_is_securely_promotable(rinfo, index->rel)) return; /* OK, check each index key column for a match */ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++) { IndexClause *iclause; ListCell *lc; /* Ignore duplicates */ foreach(lc, clauseset->indexclauses[indexcol]) { - IndexClause *iclause = (IndexClause *) lfirst(lc); + iclause = (IndexClause *) lfirst(lc); if (iclause->rinfo == rinfo) return; } /* OK, try to match the clause to the index column */ iclause = match_clause_to_indexcol(root, rinfo, indexcol, index); if (iclause) { /* Success, so record it */ clauseset->indexclauses[indexcol] = lappend(clauseset->indexclauses[indexcol], iclause); clauseset->nonempty = true; return; } } } /* diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index df4ca129191..b15ecc83971 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -2383,45 +2383,45 @@ finalize_plan(PlannerInfo *root, Plan *plan, /* We must run finalize_plan on the subquery */ rel = find_base_rel(root, sscan->scan.scanrelid); subquery_params = rel->subroot->outer_params; if (gather_param >= 0) subquery_params = bms_add_member(bms_copy(subquery_params), gather_param); finalize_plan(rel->subroot, sscan->subplan, gather_param, subquery_params, NULL); /* Now we can add its extParams to the parent's params */ context.paramids = bms_add_members(context.paramids, sscan->subplan->extParam); /* We need scan_params too, though */ context.paramids = bms_add_members(context.paramids, scan_params); } break; case T_FunctionScan: { FunctionScan *fscan = (FunctionScan *) plan; - ListCell *lc; + ListCell *lc; // /* * Call finalize_primnode independently on each function * expression, so that we can record which params are * referenced in each, in order to decide which need * re-evaluating during rescan. */ foreach(lc, fscan->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); finalize_primnode_context funccontext; funccontext = context; funccontext.paramids = NULL; finalize_primnode(rtfunc->funcexpr, &funccontext); /* remember results for execution */ rtfunc->funcparams = funccontext.paramids; /* add the function's params to the overall set */ context.paramids = bms_add_members(context.paramids, @@ -2491,158 +2491,148 @@ finalize_plan(PlannerInfo *root, Plan *plan, case T_NamedTuplestoreScan: context.paramids = bms_add_members(context.paramids, scan_params); break; case T_ForeignScan: { ForeignScan *fscan = (ForeignScan *) plan; finalize_primnode((Node *) fscan->fdw_exprs, &context); finalize_primnode((Node *) fscan->fdw_recheck_quals, &context); /* We assume fdw_scan_tlist cannot contain Params */ context.paramids = bms_add_members(context.paramids, scan_params); } break; case T_CustomScan: { CustomScan *cscan = (CustomScan *) plan; - ListCell *lc; + ListCell *lc; // finalize_primnode((Node *) cscan->custom_exprs, &context); /* We assume custom_scan_tlist cannot contain Params */ context.paramids = bms_add_members(context.paramids, scan_params); /* child nodes if any */ foreach(lc, cscan->custom_plans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(lc), gather_param, valid_params, scan_params)); } } break; case T_ModifyTable: { ModifyTable *mtplan = (ModifyTable *) plan; /* Force descendant scan nodes to reference epqParam */ locally_added_param = mtplan->epqParam; valid_params = bms_add_member(bms_copy(valid_params), locally_added_param); scan_params = bms_add_member(bms_copy(scan_params), locally_added_param); finalize_primnode((Node *) mtplan->returningLists, &context); finalize_primnode((Node *) mtplan->onConflictSet, &context); finalize_primnode((Node *) mtplan->onConflictWhere, &context); /* exclRelTlist contains only Vars, doesn't need examination */ } break; case T_Append: { - ListCell *l; - foreach(l, ((Append *) plan)->appendplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_MergeAppend: { - ListCell *l; - foreach(l, ((MergeAppend *) plan)->mergeplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_BitmapAnd: { - ListCell *l; - foreach(l, ((BitmapAnd *) plan)->bitmapplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_BitmapOr: { - ListCell *l; - foreach(l, ((BitmapOr *) plan)->bitmapplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_NestLoop: { - ListCell *l; - finalize_primnode((Node *) ((Join *) plan)->joinqual, &context); /* collect set of params that will be passed to right child */ foreach(l, ((NestLoop *) plan)->nestParams) { NestLoopParam *nlp = (NestLoopParam *) lfirst(l); nestloop_params = bms_add_member(nestloop_params, nlp->paramno); } } break; case T_MergeJoin: finalize_primnode((Node *) ((Join *) plan)->joinqual, &context); finalize_primnode((Node *) ((MergeJoin *) plan)->mergeclauses, &context); break; case T_HashJoin: finalize_primnode((Node *) ((Join *) plan)->joinqual, diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 091d6e886b6..2720a2508cb 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -4300,46 +4300,45 @@ get_qual_for_range(Relation parent, PartitionBoundSpec *spec, int i, j; PartitionRangeDatum *ldatum, *udatum; PartitionKey key = RelationGetPartitionKey(parent); Expr *keyCol; Const *lower_val, *upper_val; List *lower_or_arms, *upper_or_arms; int num_or_arms, current_or_arm; ListCell *lower_or_start_datum, *upper_or_start_datum; bool need_next_lower_arm, need_next_upper_arm; if (spec->is_default) { List *or_expr_args = NIL; PartitionDesc pdesc = RelationGetPartitionDesc(parent, false); Oid *inhoids = pdesc->oids; - int nparts = pdesc->nparts, - i; + int nparts = pdesc->nparts; for (i = 0; i < nparts; i++) { Oid inhrelid = inhoids[i]; HeapTuple tuple; Datum datum; bool isnull; PartitionBoundSpec *bspec; tuple = SearchSysCache1(RELOID, inhrelid); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", inhrelid); datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relpartbound, &isnull); if (isnull) elog(ERROR, "null relpartbound for relation %u", inhrelid); bspec = (PartitionBoundSpec *) stringToNode(TextDatumGetCString(datum)); if (!IsA(bspec, PartitionBoundSpec)) diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 89cf9f9389c..8ac78a6cf38 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2301,45 +2301,44 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, * previous tuple's toast chunks. */ Assert(change->data.tp.clear_toast_afterwards); ReorderBufferToastReset(rb, txn); /* We don't need this record anymore. */ ReorderBufferReturnChange(rb, specinsert, true); specinsert = NULL; } break; case REORDER_BUFFER_CHANGE_TRUNCATE: { int i; int nrelids = change->data.truncate.nrelids; int nrelations = 0; Relation *relations; relations = palloc0(nrelids * sizeof(Relation)); for (i = 0; i < nrelids; i++) { Oid relid = change->data.truncate.relids[i]; - Relation relation; relation = RelationIdGetRelation(relid); if (!RelationIsValid(relation)) elog(ERROR, "could not open relation with OID %u", relid); if (!RelationIsLogicallyLogged(relation)) continue; relations[nrelations++] = relation; } /* Apply the truncate. */ ReorderBufferApplyTruncate(rb, txn, nrelations, relations, change, streaming); for (i = 0; i < nrelations; i++) RelationClose(relations[i]); break; } diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index a233dd47585..b2a72374306 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -805,45 +805,44 @@ add_with_check_options(Relation rel, wco->polname = NULL; wco->cascaded = false; if (list_length(permissive_quals) == 1) wco->qual = (Node *) linitial(permissive_quals); else wco->qual = (Node *) makeBoolExpr(OR_EXPR, permissive_quals, -1); ChangeVarNodes(wco->qual, 1, rt_index, 0); *withCheckOptions = list_append_unique(*withCheckOptions, wco); /* * Now add WithCheckOptions for each of the restrictive policy clauses * (which will be combined together using AND). We use a separate * WithCheckOption for each restrictive policy to allow the policy * name to be included in error reports if the policy is violated. */ foreach(item, restrictive_policies) { RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); Expr *qual = QUAL_FOR_WCO(policy); - WithCheckOption *wco; if (qual != NULL) { qual = copyObject(qual); ChangeVarNodes((Node *) qual, 1, rt_index, 0); wco = makeNode(WithCheckOption); wco->kind = kind; wco->relname = pstrdup(RelationGetRelationName(rel)); wco->polname = pstrdup(policy->policy_name); wco->qual = (Node *) qual; wco->cascaded = false; *withCheckOptions = list_append_unique(*withCheckOptions, wco); *hasSubLinks |= policy->hassublinks; } } } else { /* * If there were no policy clauses to check new data, add a single diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index 1190b8000bc..71a6053b6a0 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -674,73 +674,71 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) if (minLower) { /* * If the centroid's lower bound is less than or equal to the * minimum lower bound, anything in the 3rd and 4th quadrants * will have an even smaller lower bound, and thus can't * match. */ if (range_cmp_bounds(typcache, ¢roidLower, minLower) <= 0) which &= (1 << 1) | (1 << 2) | (1 << 5); } if (maxLower) { /* * If the centroid's lower bound is greater than the maximum * lower bound, anything in the 1st and 2nd quadrants will * also have a greater than or equal lower bound, and thus * can't match. If the centroid's lower bound is equal to the * maximum lower bound, we can still exclude the 1st and 2nd * quadrants if we're looking for a value strictly greater * than the maximum. */ - int cmp; cmp = range_cmp_bounds(typcache, ¢roidLower, maxLower); if (cmp > 0 || (!inclusive && cmp == 0)) which &= (1 << 3) | (1 << 4) | (1 << 5); } if (minUpper) { /* * If the centroid's upper bound is less than or equal to the * minimum upper bound, anything in the 2nd and 3rd quadrants * will have an even smaller upper bound, and thus can't * match. */ if (range_cmp_bounds(typcache, ¢roidUpper, minUpper) <= 0) which &= (1 << 1) | (1 << 4) | (1 << 5); } if (maxUpper) { /* * If the centroid's upper bound is greater than the maximum * upper bound, anything in the 1st and 4th quadrants will * also have a greater than or equal upper bound, and thus * can't match. If the centroid's upper bound is equal to the * maximum upper bound, we can still exclude the 1st and 4th * quadrants if we're looking for a value strictly greater * than the maximum. */ - int cmp; cmp = range_cmp_bounds(typcache, ¢roidUpper, maxUpper); if (cmp > 0 || (!inclusive && cmp == 0)) which &= (1 << 2) | (1 << 3) | (1 << 5); } if (which == 0) break; /* no need to consider remaining conditions */ } } /* We must descend into the quadrant(s) identified by 'which' */ out->nodeNumbers = (int *) palloc(sizeof(int) * in->nNodes); if (needPrevious) out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); out->nNodes = 0; /* * Elements of traversalValues should be allocated in * traversalMemoryContext */ oldCtx = MemoryContextSwitchTo(in->traversalMemoryContext); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 8280711f7ef..9959f6910e9 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1284,45 +1284,44 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, idxrelrec = (Form_pg_class) GETSTRUCT(ht_idxrel); /* * Fetch the pg_am tuple of the index' access method */ ht_am = SearchSysCache1(AMOID, ObjectIdGetDatum(idxrelrec->relam)); if (!HeapTupleIsValid(ht_am)) elog(ERROR, "cache lookup failed for access method %u", idxrelrec->relam); amrec = (Form_pg_am) GETSTRUCT(ht_am); /* Fetch the index AM's API struct */ amroutine = GetIndexAmRoutine(amrec->amhandler); /* * Get the index expressions, if any. (NOTE: we do not use the relcache * versions of the expressions and predicate, because we want to display * non-const-folded expressions.) */ if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs, NULL)) { Datum exprsDatum; - bool isnull; char *exprsString; exprsDatum = SysCacheGetAttr(INDEXRELID, ht_idx, Anum_pg_index_indexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); indexprs = (List *) stringToNode(exprsString); pfree(exprsString); } else indexprs = NIL; indexpr_item = list_head(indexprs); context = deparse_context_for(get_relation_name(indrelid), indrelid); /* * Start the index definition. Note that the index's name should never be * schema-qualified, but the indexed rel's name may be. */ initStringInfo(&buf); @@ -1481,45 +1480,44 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, */ if (showTblSpc) { Oid tblspc; tblspc = get_rel_tablespace(indexrelid); if (OidIsValid(tblspc)) { if (isConstraint) appendStringInfoString(&buf, " USING INDEX"); appendStringInfo(&buf, " TABLESPACE %s", quote_identifier(get_tablespace_name(tblspc))); } } /* * If it's a partial index, decompile and append the predicate */ if (!heap_attisnull(ht_idx, Anum_pg_index_indpred, NULL)) { Node *node; Datum predDatum; - bool isnull; char *predString; /* Convert text string to node tree */ predDatum = SysCacheGetAttr(INDEXRELID, ht_idx, Anum_pg_index_indpred, &isnull); Assert(!isnull); predString = TextDatumGetCString(predDatum); node = (Node *) stringToNode(predString); pfree(predString); /* Deparse */ str = deparse_expression_pretty(node, context, false, false, prettyFlags, 0); if (isConstraint) appendStringInfo(&buf, " WHERE (%s)", str); else appendStringInfo(&buf, " WHERE %s", str); } } /* Clean up */ ReleaseSysCache(ht_idx); @@ -1926,45 +1924,44 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, Assert(form->partrelid == relid); /* Must get partclass and partcollation the hard way */ datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partclass, &isnull); Assert(!isnull); partclass = (oidvector *) DatumGetPointer(datum); datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partcollation, &isnull); Assert(!isnull); partcollation = (oidvector *) DatumGetPointer(datum); /* * Get the expressions, if any. (NOTE: we do not use the relcache * versions of the expressions, because we want to display * non-const-folded expressions.) */ if (!heap_attisnull(tuple, Anum_pg_partitioned_table_partexprs, NULL)) { Datum exprsDatum; - bool isnull; char *exprsString; exprsDatum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); partexprs = (List *) stringToNode(exprsString); if (!IsA(partexprs, List)) elog(ERROR, "unexpected node type found in partexprs: %d", (int) nodeTag(partexprs)); pfree(exprsString); } else partexprs = NIL; partexpr_item = list_head(partexprs); context = deparse_context_for(get_relation_name(relid), relid); initStringInfo(&buf);