diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 87b243e0d4b..a090cada400 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -3017,46 +3017,45 @@ XLogFileInitInternal(XLogSegNo logsegno, TimeLineID logtli, } pgstat_report_wait_end(); if (save_errno) { /* * If we fail to make the file, delete it to release disk space */ unlink(tmppath); close(fd); errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", tmppath))); } pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_SYNC); if (pg_fsync(fd) != 0) { - int save_errno = errno; - + save_errno = errno; close(fd); errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", tmppath))); } pgstat_report_wait_end(); if (close(fd) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", tmppath))); /* * Now move the segment into place with its final name. Cope with * possibility that someone else has created the file while we were * filling ours: if so, use ours to pre-create a future log segment. */ installed_segno = logsegno; /* * XXX: What should we use as max_segno? We used to use XLOGfileslop when diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 9be04c8a1e7..dacc989d855 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -16777,45 +16777,44 @@ PreCommit_on_commit_actions(void) oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid); break; case ONCOMMIT_DROP: oids_to_drop = lappend_oid(oids_to_drop, oc->relid); break; } } /* * Truncate relations before dropping so that all dependencies between * relations are removed after they are worked on. Doing it like this * might be a waste as it is possible that a relation being truncated will * be dropped anyway due to its parent being dropped, but this makes the * code more robust because of not having to re-check that the relation * exists at truncation time. */ if (oids_to_truncate != NIL) heap_truncate(oids_to_truncate); if (oids_to_drop != NIL) { ObjectAddresses *targetObjects = new_object_addresses(); - ListCell *l; foreach(l, oids_to_drop) { ObjectAddress object; object.classId = RelationRelationId; object.objectId = lfirst_oid(l); object.objectSubId = 0; Assert(!object_address_present(&object, targetObjects)); add_exact_object_address(&object, targetObjects); } /* * Since this is an automatic drop, rather than one directly initiated * by the user, we pass the PERFORM_DELETION_INTERNAL flag. */ performMultipleDeletions(targetObjects, DROP_CASCADE, PERFORM_DELETION_INTERNAL | PERFORM_DELETION_QUIETLY); #ifdef USE_ASSERT_CHECKING diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index dbdfe8bd2d4..3670d1f1861 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -214,46 +214,44 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) (skip_locked ? VACOPT_SKIP_LOCKED : 0) | (analyze ? VACOPT_ANALYZE : 0) | (freeze ? VACOPT_FREEZE : 0) | (full ? VACOPT_FULL : 0) | (disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0) | (process_toast ? VACOPT_PROCESS_TOAST : 0); /* sanity checks on options */ Assert(params.options & (VACOPT_VACUUM | VACOPT_ANALYZE)); Assert((params.options & VACOPT_VACUUM) || !(params.options & (VACOPT_FULL | VACOPT_FREEZE))); if ((params.options & VACOPT_FULL) && params.nworkers > 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("VACUUM FULL cannot be performed in parallel"))); /* * Make sure VACOPT_ANALYZE is specified if any column lists are present. */ if (!(params.options & VACOPT_ANALYZE)) { - ListCell *lc; - foreach(lc, vacstmt->rels) { VacuumRelation *vrel = lfirst_node(VacuumRelation, lc); if (vrel->va_cols != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ANALYZE option must be specified when a column list is provided"))); } } /* * All freeze ages are zero if the FREEZE option is given; otherwise pass * them as -1 which means to use the default values. */ if (params.options & VACOPT_FREEZE) { params.freeze_min_age = 0; params.freeze_table_age = 0; params.multixact_freeze_min_age = 0; params.multixact_freeze_table_age = 0; } diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index ac03271882f..901dd435efd 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -749,45 +749,44 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, */ if (map == NULL) { /* * It's safe to reuse these from the partition root, as we * only process one tuple at a time (therefore we won't * overwrite needed data in slots), and the results of * projections are independent of the underlying storage. * Projections and where clauses themselves don't store state * / are independent of the underlying storage. */ onconfl->oc_ProjSlot = rootResultRelInfo->ri_onConflict->oc_ProjSlot; onconfl->oc_ProjInfo = rootResultRelInfo->ri_onConflict->oc_ProjInfo; onconfl->oc_WhereClause = rootResultRelInfo->ri_onConflict->oc_WhereClause; } else { List *onconflset; List *onconflcols; - bool found_whole_row; /* * Translate expressions in onConflictSet to account for * different attribute numbers. For that, map partition * varattnos twice: first to catch the EXCLUDED * pseudo-relation (INNER_VAR), and second to handle the main * target relation (firstVarno). */ onconflset = copyObject(node->onConflictSet); if (part_attmap == NULL) part_attmap = build_attrmap_by_name(RelationGetDescr(partrel), RelationGetDescr(firstResultRel)); onconflset = (List *) map_variable_attnos((Node *) onconflset, INNER_VAR, 0, part_attmap, RelationGetForm(partrel)->reltype, &found_whole_row); /* We ignore the value of found_whole_row. */ onconflset = (List *) map_variable_attnos((Node *) onconflset, diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 7d176e7b00a..8ba27a98b42 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -342,45 +342,44 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) bpath = create_bitmap_heap_path(root, rel, bitmapqual, rel->lateral_relids, 1.0, 0); add_path(rel, (Path *) bpath); /* create a partial bitmap heap path */ if (rel->consider_parallel && rel->lateral_relids == NULL) create_partial_bitmap_paths(root, rel, bitmapqual); } /* * Likewise, if we found anything usable, generate BitmapHeapPaths for the * most promising combinations of join bitmap index paths. Our strategy * is to generate one such path for each distinct parameterization seen * among the available bitmap index paths. This may look pretty * expensive, but usually there won't be very many distinct * parameterizations. (This logic is quite similar to that in * consider_index_join_clauses, but we're working with whole paths not * individual clauses.) */ if (bitjoinpaths != NIL) { List *all_path_outers; - ListCell *lc; /* Identify each distinct parameterization seen in bitjoinpaths */ all_path_outers = NIL; foreach(lc, bitjoinpaths) { Path *path = (Path *) lfirst(lc); Relids required_outer = PATH_REQ_OUTER(path); if (!bms_equal_any(required_outer, all_path_outers)) all_path_outers = lappend(all_path_outers, required_outer); } /* Now, for each distinct parameterization set ... */ foreach(lc, all_path_outers) { Relids max_outers = (Relids) lfirst(lc); List *this_path_set; Path *bitmapqual; Relids required_outer; double loop_count; BitmapHeapPath *bpath; ListCell *lcp; diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index df4ca129191..b15ecc83971 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -2383,45 +2383,45 @@ finalize_plan(PlannerInfo *root, Plan *plan, /* We must run finalize_plan on the subquery */ rel = find_base_rel(root, sscan->scan.scanrelid); subquery_params = rel->subroot->outer_params; if (gather_param >= 0) subquery_params = bms_add_member(bms_copy(subquery_params), gather_param); finalize_plan(rel->subroot, sscan->subplan, gather_param, subquery_params, NULL); /* Now we can add its extParams to the parent's params */ context.paramids = bms_add_members(context.paramids, sscan->subplan->extParam); /* We need scan_params too, though */ context.paramids = bms_add_members(context.paramids, scan_params); } break; case T_FunctionScan: { FunctionScan *fscan = (FunctionScan *) plan; - ListCell *lc; + ListCell *lc; // /* * Call finalize_primnode independently on each function * expression, so that we can record which params are * referenced in each, in order to decide which need * re-evaluating during rescan. */ foreach(lc, fscan->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); finalize_primnode_context funccontext; funccontext = context; funccontext.paramids = NULL; finalize_primnode(rtfunc->funcexpr, &funccontext); /* remember results for execution */ rtfunc->funcparams = funccontext.paramids; /* add the function's params to the overall set */ context.paramids = bms_add_members(context.paramids, @@ -2491,158 +2491,148 @@ finalize_plan(PlannerInfo *root, Plan *plan, case T_NamedTuplestoreScan: context.paramids = bms_add_members(context.paramids, scan_params); break; case T_ForeignScan: { ForeignScan *fscan = (ForeignScan *) plan; finalize_primnode((Node *) fscan->fdw_exprs, &context); finalize_primnode((Node *) fscan->fdw_recheck_quals, &context); /* We assume fdw_scan_tlist cannot contain Params */ context.paramids = bms_add_members(context.paramids, scan_params); } break; case T_CustomScan: { CustomScan *cscan = (CustomScan *) plan; - ListCell *lc; + ListCell *lc; // finalize_primnode((Node *) cscan->custom_exprs, &context); /* We assume custom_scan_tlist cannot contain Params */ context.paramids = bms_add_members(context.paramids, scan_params); /* child nodes if any */ foreach(lc, cscan->custom_plans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(lc), gather_param, valid_params, scan_params)); } } break; case T_ModifyTable: { ModifyTable *mtplan = (ModifyTable *) plan; /* Force descendant scan nodes to reference epqParam */ locally_added_param = mtplan->epqParam; valid_params = bms_add_member(bms_copy(valid_params), locally_added_param); scan_params = bms_add_member(bms_copy(scan_params), locally_added_param); finalize_primnode((Node *) mtplan->returningLists, &context); finalize_primnode((Node *) mtplan->onConflictSet, &context); finalize_primnode((Node *) mtplan->onConflictWhere, &context); /* exclRelTlist contains only Vars, doesn't need examination */ } break; case T_Append: { - ListCell *l; - foreach(l, ((Append *) plan)->appendplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_MergeAppend: { - ListCell *l; - foreach(l, ((MergeAppend *) plan)->mergeplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_BitmapAnd: { - ListCell *l; - foreach(l, ((BitmapAnd *) plan)->bitmapplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_BitmapOr: { - ListCell *l; - foreach(l, ((BitmapOr *) plan)->bitmapplans) { context.paramids = bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), gather_param, valid_params, scan_params)); } } break; case T_NestLoop: { - ListCell *l; - finalize_primnode((Node *) ((Join *) plan)->joinqual, &context); /* collect set of params that will be passed to right child */ foreach(l, ((NestLoop *) plan)->nestParams) { NestLoopParam *nlp = (NestLoopParam *) lfirst(l); nestloop_params = bms_add_member(nestloop_params, nlp->paramno); } } break; case T_MergeJoin: finalize_primnode((Node *) ((Join *) plan)->joinqual, &context); finalize_primnode((Node *) ((MergeJoin *) plan)->mergeclauses, &context); break; case T_HashJoin: finalize_primnode((Node *) ((Join *) plan)->joinqual, diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index 043181b586b..71052c841d7 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -634,45 +634,44 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root, * For UNION ALL, we just need the Append path. For UNION, need to add * node(s) to remove duplicates. */ if (!op->all) path = make_union_unique(op, path, tlist, root); add_path(result_rel, path); /* * Estimate number of groups. For now we just assume the output is unique * --- this is certainly true for the UNION case, and we want worst-case * estimates anyway. */ result_rel->rows = path->rows; /* * Now consider doing the same thing using the partial paths plus Append * plus Gather. */ if (partial_paths_valid) { Path *ppath; - ListCell *lc; int parallel_workers = 0; /* Find the highest number of workers requested for any subpath. */ foreach(lc, partial_pathlist) { Path *path = lfirst(lc); parallel_workers = Max(parallel_workers, path->parallel_workers); } Assert(parallel_workers > 0); /* * If the use of parallel append is permitted, always request at least * log2(# of children) paths. We assume it can be useful to have * extra workers in this case because they will be spread out across * the children. The precise formula is just a guess; see * add_paths_to_append_rel. */ if (enable_parallel_append) { parallel_workers = Max(parallel_workers, pg_leftmost_one_pos32(list_length(partial_pathlist)) + 1); diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index c1c27e67d47..bf698c1fc3f 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -1246,45 +1246,44 @@ dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, N * first argument, and pseudoconstant is the second one. */ if (!is_pseudo_constant_clause(lsecond(expr->args))) return false; clause_expr = linitial(expr->args); /* * If it's not an "=" operator, just ignore the clause, as it's not * compatible with functional dependencies. The operator is identified * simply by looking at which function it uses to estimate * selectivity. That's a bit strange, but it's what other similar * places do. */ if (get_oprrest(expr->opno) != F_EQSEL) return false; /* OK to proceed with checking "var" */ } else if (is_orclause(clause)) { BoolExpr *bool_expr = (BoolExpr *) clause; - ListCell *lc; /* start with no expression (we'll use the first match) */ *expr = NULL; foreach(lc, bool_expr->args) { Node *or_expr = NULL; /* * Had we found incompatible expression in the arguments, treat * the whole expression as incompatible. */ if (!dependency_is_compatible_expression((Node *) lfirst(lc), relid, statlist, &or_expr)) return false; if (*expr == NULL) *expr = or_expr; /* ensure all the expressions are the same */ if (!equal(or_expr, *expr)) return false; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 8964f73b929..3f5683f70b5 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1284,45 +1284,44 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, idxrelrec = (Form_pg_class) GETSTRUCT(ht_idxrel); /* * Fetch the pg_am tuple of the index' access method */ ht_am = SearchSysCache1(AMOID, ObjectIdGetDatum(idxrelrec->relam)); if (!HeapTupleIsValid(ht_am)) elog(ERROR, "cache lookup failed for access method %u", idxrelrec->relam); amrec = (Form_pg_am) GETSTRUCT(ht_am); /* Fetch the index AM's API struct */ amroutine = GetIndexAmRoutine(amrec->amhandler); /* * Get the index expressions, if any. (NOTE: we do not use the relcache * versions of the expressions and predicate, because we want to display * non-const-folded expressions.) */ if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs, NULL)) { Datum exprsDatum; - bool isnull; char *exprsString; exprsDatum = SysCacheGetAttr(INDEXRELID, ht_idx, Anum_pg_index_indexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); indexprs = (List *) stringToNode(exprsString); pfree(exprsString); } else indexprs = NIL; indexpr_item = list_head(indexprs); context = deparse_context_for(get_relation_name(indrelid), indrelid); /* * Start the index definition. Note that the index's name should never be * schema-qualified, but the indexed rel's name may be. */ initStringInfo(&buf); @@ -1481,45 +1480,44 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, */ if (showTblSpc) { Oid tblspc; tblspc = get_rel_tablespace(indexrelid); if (OidIsValid(tblspc)) { if (isConstraint) appendStringInfoString(&buf, " USING INDEX"); appendStringInfo(&buf, " TABLESPACE %s", quote_identifier(get_tablespace_name(tblspc))); } } /* * If it's a partial index, decompile and append the predicate */ if (!heap_attisnull(ht_idx, Anum_pg_index_indpred, NULL)) { Node *node; Datum predDatum; - bool isnull; char *predString; /* Convert text string to node tree */ predDatum = SysCacheGetAttr(INDEXRELID, ht_idx, Anum_pg_index_indpred, &isnull); Assert(!isnull); predString = TextDatumGetCString(predDatum); node = (Node *) stringToNode(predString); pfree(predString); /* Deparse */ str = deparse_expression_pretty(node, context, false, false, prettyFlags, 0); if (isConstraint) appendStringInfo(&buf, " WHERE (%s)", str); else appendStringInfo(&buf, " WHERE %s", str); } } /* Clean up */ ReleaseSysCache(ht_idx); @@ -1629,45 +1627,44 @@ pg_get_statisticsobj_worker(Oid statextid, bool columns_only, bool missing_ok) statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid)); if (!HeapTupleIsValid(statexttup)) { if (missing_ok) return NULL; elog(ERROR, "cache lookup failed for statistics object %u", statextid); } /* has the statistics expressions? */ has_exprs = !heap_attisnull(statexttup, Anum_pg_statistic_ext_stxexprs, NULL); statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup); /* * Get the statistics expressions, if any. (NOTE: we do not use the * relcache versions of the expressions, because we want to display * non-const-folded expressions.) */ if (has_exprs) { Datum exprsDatum; - bool isnull; char *exprsString; exprsDatum = SysCacheGetAttr(STATEXTOID, statexttup, Anum_pg_statistic_ext_stxexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); exprs = (List *) stringToNode(exprsString); pfree(exprsString); } else exprs = NIL; /* count the number of columns (attributes and expressions) */ ncolumns = statextrec->stxkeys.dim1 + list_length(exprs); initStringInfo(&buf); if (!columns_only) { nsp = get_namespace_name_or_temp(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp, @@ -1925,45 +1922,44 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, Assert(form->partrelid == relid); /* Must get partclass and partcollation the hard way */ datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partclass, &isnull); Assert(!isnull); partclass = (oidvector *) DatumGetPointer(datum); datum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partcollation, &isnull); Assert(!isnull); partcollation = (oidvector *) DatumGetPointer(datum); /* * Get the expressions, if any. (NOTE: we do not use the relcache * versions of the expressions, because we want to display * non-const-folded expressions.) */ if (!heap_attisnull(tuple, Anum_pg_partitioned_table_partexprs, NULL)) { Datum exprsDatum; - bool isnull; char *exprsString; exprsDatum = SysCacheGetAttr(PARTRELID, tuple, Anum_pg_partitioned_table_partexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); partexprs = (List *) stringToNode(exprsString); if (!IsA(partexprs, List)) elog(ERROR, "unexpected node type found in partexprs: %d", (int) nodeTag(partexprs)); pfree(exprsString); } else partexprs = NIL; partexpr_item = list_head(partexprs); context = deparse_context_for(get_relation_name(relid), relid); initStringInfo(&buf);