diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index daba66c..6aac456 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -3005,6 +3005,11 @@ VALUES ('Albany', NULL, NULL, 'NY'); foreign table partitions. + + Updating the partition key of a row might cause it to be moved into a + different partition where this row satisfies its partition constraint. + + Example @@ -3297,9 +3302,22 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - An UPDATE that causes a row to move from one partition to - another fails, because the new value of the row fails to satisfy the - implicit partition constraint of the original partition. + When an UPDATE causes a row to move from one + partition to another, there is a chance that another concurrent + UPDATE or DELETE misses this row. + Suppose, during the row movement, the row is still visible for the + concurrent session, and it is about to do an UPDATE + or DELETE operation on the same row. This DML + operation can silently miss this row if the row now gets deleted from + the partition by the first session as part of its + UPDATE row movement. In such case, the concurrent + UPDATE/DELETE, being unaware of + the row movement, interprets that the row has just been deleted so there + is nothing to be done for this row. Whereas, in the usual case where the + table is not partitioned, or where there is no row movement, the second + session would have identified the newly updated row and carried + UPDATE/DELETE on this new row + version. diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml index 0e99aa9..bd57f3f 100644 --- a/doc/src/sgml/ref/update.sgml +++ b/doc/src/sgml/ref/update.sgml @@ -282,10 +282,17 @@ UPDATE count In the case of a partitioned table, updating a row might cause it to no - longer satisfy the partition constraint. Since there is no provision to - move the row to the partition appropriate to the new value of its - partitioning key, an error will occur in this case. This can also happen - when updating a partition directly. + longer satisfy the partition constraint of the containing partition. In that + case, if there is some other partition in the partition tree for which this + row satisfies its partition constraint, then the row is moved to that + partition. If there isn't such a partition, an error will occur. The error + will also occur when updating a partition directly. Behind the scenes, the + row movement is actually a DELETE and + INSERT operation. However, there is a possibility that a + concurrent UPDATE or DELETE on the same row may miss + this row. For details see the section + . + diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml index b0e160a..a8b000a 100644 --- a/doc/src/sgml/trigger.sgml +++ b/doc/src/sgml/trigger.sgml @@ -154,6 +154,29 @@ + If an UPDATE on a partitioned table causes a row to + move to another partition, it will be performed as a + DELETE from the original partition followed by + INSERT into the new partition. In this case, all + row-level BEFORE UPDATE triggers and all + row-level BEFORE DELETE triggers are fired + on the original partition. Then all row-level BEFORE + INSERT triggers are fired on the destination partition. + The possibility of surprising outcomes should be considered when all these + triggers affect the row being moved. As far as AFTER ROW + triggers are concerned, AFTER DELETE and + AFTER INSERT triggers are applied; but + AFTER UPDATE triggers are not applied + because the UPDATE has been converted to a + DELETE and INSERT. As far as + statement-level triggers are concerned, none of the + DELETE or INSERT triggers are fired, + even if row movement occurs; only the UPDATE triggers + defined on the target table used in the UPDATE statement + will be fired. + + + Trigger functions invoked by per-statement triggers should always return NULL. Trigger functions invoked by per-row triggers can return a table row (a value of diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c index cff59ed..1408cd6 100644 --- a/src/backend/catalog/partition.c +++ b/src/backend/catalog/partition.c @@ -1442,7 +1442,8 @@ get_qual_from_partbound(Relation rel, Relation parent, /* * map_partition_varattnos - maps varattno of any Vars in expr from the - * parent attno to partition attno. + * attno's of 'from_rel' partition to the attno's of 'to_rel' partition. + * The rels can be both leaf partition or a partitioned table. * * We must allow for cases where physical attnos of a partition can be * different from the parent's. @@ -1455,8 +1456,8 @@ get_qual_from_partbound(Relation rel, Relation parent, * are working on Lists, so it's less messy to do the casts internally. */ List * -map_partition_varattnos(List *expr, int target_varno, - Relation partrel, Relation parent, +map_partition_varattnos(List *expr, int fromrel_varno, + Relation to_rel, Relation from_rel, bool *found_whole_row) { bool my_found_whole_row = false; @@ -1465,14 +1466,14 @@ map_partition_varattnos(List *expr, int target_varno, { AttrNumber *part_attnos; - part_attnos = convert_tuples_by_name_map(RelationGetDescr(partrel), - RelationGetDescr(parent), + part_attnos = convert_tuples_by_name_map(RelationGetDescr(to_rel), + RelationGetDescr(from_rel), gettext_noop("could not convert row type")); expr = (List *) map_variable_attnos((Node *) expr, - target_varno, 0, + fromrel_varno, 0, part_attnos, - RelationGetDescr(parent)->natts, - RelationGetForm(partrel)->reltype, + RelationGetDescr(from_rel)->natts, + RelationGetForm(to_rel)->reltype, &my_found_whole_row); } @@ -2873,6 +2874,79 @@ error_exit: } /* + * pull_child_partition_columns + * + * For each column of rel which is in the partition key or which appears + * in an expression which is in the partition key, translate the attribute + * number of that column according to the given parent, and add the resulting + * column number to the 'partcols' bitmapset, offset as we frequently do by + * FirstLowInvalidHeapAttributeNumber. + */ +void +pull_child_partition_columns(Relation rel, + Relation parent, + Bitmapset **partcols) +{ + PartitionKey key = RelationGetPartitionKey(rel); + int16 partnatts = get_partition_natts(key); + List *partexprs = get_partition_exprs(key); + ListCell *lc; + Bitmapset *child_keycols = NULL; + int i; + AttrNumber *map; + int child_keycol = -1; + + /* + * First, compute the complete set of partition columns for this rel. For + * compatibility with the API exposed by pull_varattnos, we offset the + * column numbers by FirstLowInvalidHeapAttributeNumber. + */ + for (i = 0; i < partnatts; i++) + { + AttrNumber partattno = get_partition_col_attnum(key, i); + + if (partattno != 0) + child_keycols = + bms_add_member(child_keycols, + partattno - FirstLowInvalidHeapAttributeNumber); + } + foreach(lc, partexprs) + { + Node *expr = (Node *) lfirst(lc); + + pull_varattnos(expr, 1, &child_keycols); + } + + /* + * Next, work out how to convert from the attribute numbers for the child + * to the attribute numbers for the parent. + */ + map = + convert_tuples_by_name_map(RelationGetDescr(parent), + RelationGetDescr(rel), + gettext_noop("could not convert row type")); + + /* + * For each child key column we have identified, translate to the + * corresponding parent key column. Entry 0 in the map array corresponds + * to attribute number 1, which corresponds to a bitmapset entry for 1 - + * FirstLowInvalidHeapAttributeNumber. + */ + while ((child_keycol = bms_next_member(child_keycols, child_keycol)) >= 0) + { + int kc = child_keycol + FirstLowInvalidHeapAttributeNumber; + + Assert(kc > 0 && kc <= RelationGetNumberOfAttributes(rel)); + *partcols = + bms_add_member(*partcols, + map[kc - 1] - FirstLowInvalidHeapAttributeNumber); + } + + /* Release memory. */ + pfree(map); +} + +/* * qsort_partition_hbound_cmp * * We sort hash bounds by modulus, then by remainder. diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 8f1a8ed..547a18b 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -2478,11 +2478,14 @@ CopyFrom(CopyState cstate) num_partitions; ExecSetupPartitionTupleRouting(cstate->rel, + NULL, + 0, 1, estate, &partition_dispatch_info, &partitions, &partition_tupconv_maps, + NULL, &partition_tuple_slot, &num_parted, &num_partitions); cstate->partition_dispatch_info = partition_dispatch_info; @@ -2748,7 +2751,7 @@ CopyFrom(CopyState cstate) /* Check the constraints of the tuple */ if (cstate->rel->rd_att->constr || check_partition_constr) - ExecConstraints(resultRelInfo, slot, estate); + ExecConstraints(resultRelInfo, slot, estate, true); if (useHeapMultiInsert) { diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 92ae382..319aa6f 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -2854,8 +2854,13 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, { HeapTuple trigtuple; - Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid)); - if (fdw_trigtuple == NULL) + /* + * Note: if the UPDATE is converted into a DELETE+INSERT as part of + * update-partition-key operation, then this function is also called + * separately for DELETE and INSERT to capture transition table rows. + * In such case, either old tuple or new tuple can be NULL. + */ + if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid)) trigtuple = GetTupleForTrigger(estate, NULL, relinfo, @@ -5414,7 +5419,12 @@ AfterTriggerPendingOnRel(Oid relid) * triggers actually need to be queued. It is also called after each row, * even if there are no triggers for that event, if there are any AFTER * STATEMENT triggers for the statement which use transition tables, so that - * the transition tuplestores can be built. + * the transition tuplestores can be built. Furthermore, if the transition + * capture is happening for UPDATEd rows being moved to another partition due + * partition-key change, then this function is called once when the row is + * deleted (to capture OLD row), and once when the row is inserted to another + * partition (to capture NEW row). This is done separately because DELETE and + * INSERT happen on different tables. * * Transition tuplestores are built now, rather than when events are pulled * off of the queue because AFTER ROW triggers are allowed to select from the @@ -5463,12 +5473,27 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, bool update_new_table = transition_capture->tcs_update_new_table; bool insert_new_table = transition_capture->tcs_insert_new_table;; - if ((event == TRIGGER_EVENT_DELETE && delete_old_table) || - (event == TRIGGER_EVENT_UPDATE && update_old_table)) + /* + * For capturing transition tuples for UPDATE events fired during + * partition row movement, either oldtup or newtup can be NULL, + * depending on whether the event is for row being deleted from old + * partition or it's for row being inserted into the new partition. But + * in any case, oldtup should always be non-NULL for DELETE events, and + * newtup should be non-NULL for INSERT events, because for transition + * capture with partition row movement, INSERT and DELETE events don't + * fire; only UPDATE event is fired. + */ + Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table && + oldtup == NULL)); + Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table && + newtup == NULL)); + + if (oldtup != NULL && + ((event == TRIGGER_EVENT_DELETE && delete_old_table) || + (event == TRIGGER_EVENT_UPDATE && update_old_table))) { Tuplestorestate *old_tuplestore; - Assert(oldtup != NULL); old_tuplestore = transition_capture->tcs_private->old_tuplestore; if (map != NULL) @@ -5481,12 +5506,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, else tuplestore_puttuple(old_tuplestore, oldtup); } - if ((event == TRIGGER_EVENT_INSERT && insert_new_table) || - (event == TRIGGER_EVENT_UPDATE && update_new_table)) + if (newtup != NULL && + ((event == TRIGGER_EVENT_INSERT && insert_new_table) || + (event == TRIGGER_EVENT_UPDATE && update_new_table))) { Tuplestorestate *new_tuplestore; - Assert(newtup != NULL); new_tuplestore = transition_capture->tcs_private->new_tuplestore; if (original_insert_tuple != NULL) @@ -5506,7 +5531,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, if (trigdesc == NULL || (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) || (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) || - (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row)) + (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) || + (event == TRIGGER_EVENT_UPDATE && ((oldtup == NULL) ^ (newtup == NULL)))) return; } diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 493ff82..520dfd3 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -104,9 +104,6 @@ static char *ExecBuildSlotPartitionKeyDescription(Relation rel, int maxfieldlen); static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree); -static void ExecPartitionCheck(ResultRelInfo *resultRelInfo, - TupleTableSlot *slot, EState *estate); - /* * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does * not appear to be any good header to put it into, given the structures that @@ -1850,15 +1847,10 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, /* * ExecPartitionCheck --- check that tuple meets the partition constraint. */ -static void +bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { - Relation rel = resultRelInfo->ri_RelationDesc; - TupleDesc tupdesc = RelationGetDescr(rel); - Bitmapset *modifiedCols; - Bitmapset *insertedCols; - Bitmapset *updatedCols; ExprContext *econtext; /* @@ -1886,52 +1878,66 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, * As in case of the catalogued constraints, we treat a NULL result as * success here, not a failure. */ - if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext)) - { - char *val_desc; - Relation orig_rel = rel; + return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext); +} - /* See the comment above. */ - if (resultRelInfo->ri_PartitionRoot) +/* + * ExecPartitionCheckEmitError - Form and emit an error message after a failed + * partition constraint check. + */ +void +ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + EState *estate) +{ + Relation rel = resultRelInfo->ri_RelationDesc; + Relation orig_rel = rel; + TupleDesc tupdesc = RelationGetDescr(rel); + char *val_desc; + Bitmapset *modifiedCols; + Bitmapset *insertedCols; + Bitmapset *updatedCols; + + /* See the comments in ExecConstraints. */ + if (resultRelInfo->ri_PartitionRoot) + { + HeapTuple tuple = ExecFetchSlotTuple(slot); + TupleDesc old_tupdesc = RelationGetDescr(rel); + TupleConversionMap *map; + + rel = resultRelInfo->ri_PartitionRoot; + tupdesc = RelationGetDescr(rel); + /* a reverse map */ + map = convert_tuples_by_name(old_tupdesc, tupdesc, + gettext_noop("could not convert row type")); + if (map != NULL) { - HeapTuple tuple = ExecFetchSlotTuple(slot); - TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; - - rel = resultRelInfo->ri_PartitionRoot; - tupdesc = RelationGetDescr(rel); - /* a reverse map */ - map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); - if (map != NULL) - { - tuple = do_convert_tuple(tuple, map); - ExecSetSlotDescriptor(slot, tupdesc); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - } + tuple = do_convert_tuple(tuple, map); + ExecSetSlotDescriptor(slot, tupdesc); + ExecStoreTuple(tuple, slot, InvalidBuffer, false); } - - insertedCols = GetInsertedColumns(resultRelInfo, estate); - updatedCols = GetUpdatedColumns(resultRelInfo, estate); - modifiedCols = bms_union(insertedCols, updatedCols); - val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), - slot, - tupdesc, - modifiedCols, - 64); - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("new row for relation \"%s\" violates partition constraint", - RelationGetRelationName(orig_rel)), - val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); } + + insertedCols = GetInsertedColumns(resultRelInfo, estate); + updatedCols = GetUpdatedColumns(resultRelInfo, estate); + modifiedCols = bms_union(insertedCols, updatedCols); + val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), + slot, + tupdesc, + modifiedCols, + 64); + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("new row for relation \"%s\" violates partition constraint", + RelationGetRelationName(orig_rel)), + val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); } /* * ExecConstraints - check constraints of the tuple in 'slot' * - * This checks the traditional NOT NULL and check constraints, as well as - * the partition constraint, if any. + * This checks the traditional NOT NULL and check constraints, and if requested, + * checks the partition constraint. * * Note: 'slot' contains the tuple to check the constraints of, which may * have been converted from the original input tuple after tuple routing. @@ -1939,7 +1945,8 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, */ void ExecConstraints(ResultRelInfo *resultRelInfo, - TupleTableSlot *slot, EState *estate) + TupleTableSlot *slot, EState *estate, + bool check_partition_constraint) { Relation rel = resultRelInfo->ri_RelationDesc; TupleDesc tupdesc = RelationGetDescr(rel); @@ -2055,8 +2062,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo, } } - if (resultRelInfo->ri_PartitionCheck) - ExecPartitionCheck(resultRelInfo, slot, estate); + if (check_partition_constraint && resultRelInfo->ri_PartitionCheck && + !ExecPartitionCheck(resultRelInfo, slot, estate)) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); } @@ -3242,6 +3250,13 @@ EvalPlanQualEnd(EPQState *epqstate) * ExecSetupPartitionTupleRouting - set up information needed during * tuple routing for partitioned tables * + * 'update_rri' has the UPDATE per-subplan result rels. These are re-used + * instead of allocating new ones while generating the array of all leaf + * partition result rels. + * + * 'num_update_rri' : number of UPDATE per-subplan result rels. For INSERT, + * this is 0. + * * Output arguments: * 'pd' receives an array of PartitionDispatch objects with one entry for * every partitioned table in the partition tree @@ -3265,11 +3280,14 @@ EvalPlanQualEnd(EPQState *epqstate) */ void ExecSetupPartitionTupleRouting(Relation rel, + ResultRelInfo *update_rri, + int num_update_rri, Index resultRTindex, EState *estate, PartitionDispatch **pd, ResultRelInfo ***partitions, TupleConversionMap ***tup_conv_maps, + int **subplan_leaf_map, TupleTableSlot **partition_tuple_slot, int *num_parted, int *num_partitions) { @@ -3277,7 +3295,8 @@ ExecSetupPartitionTupleRouting(Relation rel, List *leaf_parts; ListCell *cell; int i; - ResultRelInfo *leaf_part_rri; + ResultRelInfo *leaf_part_arr; + int update_rri_index = 0; /* * Get the information about the partition tree after locking all the @@ -3286,11 +3305,45 @@ ExecSetupPartitionTupleRouting(Relation rel, (void) find_all_inheritors(RelationGetRelid(rel), RowExclusiveLock, NULL); *pd = RelationGetPartitionDispatchInfo(rel, num_parted, &leaf_parts); *num_partitions = list_length(leaf_parts); + if (subplan_leaf_map) + *subplan_leaf_map = NULL; *partitions = (ResultRelInfo **) palloc(*num_partitions * sizeof(ResultRelInfo *)); *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions * sizeof(TupleConversionMap *)); + if (num_update_rri != 0) + { + /* + * For Updates, if the leaf partition is already present in the + * per-subplan result rels, we re-use that rather than initialize a new + * result rel. The per-subplan resultrels and the resultrels of the + * leaf partitions are both in the same canonical order. So while going + * through the leaf partition oids, we need to keep track of the next + * per-subplan result rel to be looked for in the leaf partition + * resultrels. So, set update_rri_index to the first per-subplan result + * rel, and then shift it as we find them one by one while scanning the + * leaf partition oids. + */ + update_rri_index = 0; + + /* + * Prepare for generating the mapping from subplan result rels to leaf + * partition position. + */ + *subplan_leaf_map = palloc(num_update_rri * sizeof(int)); + } + else + { + /* + * For inserts, we need to create all new result rels, so avoid + * repeated pallocs by allocating memory for all the result rels in + * bulk. + */ + leaf_part_arr = (ResultRelInfo *) palloc0(*num_partitions * + sizeof(ResultRelInfo)); + } + /* * Initialize an empty slot that will be used to manipulate tuples of any * given partition's rowtype. It is attached to the caller-specified node @@ -3299,20 +3352,66 @@ ExecSetupPartitionTupleRouting(Relation rel, */ *partition_tuple_slot = MakeTupleTableSlot(); - leaf_part_rri = (ResultRelInfo *) palloc0(*num_partitions * - sizeof(ResultRelInfo)); i = 0; foreach(cell, leaf_parts) { - Relation partrel; + ResultRelInfo *leaf_part_rri; + Relation partrel = NULL; TupleDesc part_tupdesc; + Oid leaf_oid = lfirst_oid(cell); + + if (num_update_rri != 0) + { + /* Is this leaf partition present in the update resultrel ? */ + if (update_rri_index < num_update_rri && + RelationGetRelid(update_rri[update_rri_index].ri_RelationDesc) == leaf_oid) + { + leaf_part_rri = &update_rri[update_rri_index]; + partrel = leaf_part_rri->ri_RelationDesc; + + /* + * This is required when converting tuple as per root + * partition tuple descriptor. When generating the update + * plans, this was not set. + */ + leaf_part_rri->ri_PartitionRoot = rel; + + /* + * Save the position of this update rel in the leaf partitions + * array + */ + (*subplan_leaf_map)[update_rri_index] = i; + + update_rri_index++; + } + else + leaf_part_rri = (ResultRelInfo *) palloc0(sizeof(ResultRelInfo)); + } + else + { + /* For INSERTs, we already have an array of result rels allocated */ + leaf_part_rri = leaf_part_arr + i; + } /* - * We locked all the partitions above including the leaf partitions. - * Note that each of the relations in *partitions are eventually - * closed by the caller. + * If we didn't open the partition rel, it means we haven't + * initialized the result rel as well. */ - partrel = heap_open(lfirst_oid(cell), NoLock); + if (!partrel) + { + /* + * We locked all the partitions above including the leaf + * partitions. Note that each of the newly opened relations in + * *partitions are eventually closed by the caller. + */ + partrel = heap_open(leaf_oid, NoLock); + InitResultRelInfo(leaf_part_rri, + partrel, + resultRTindex, + rel, + estate->es_instrument); + } + part_tupdesc = RelationGetDescr(partrel); /* @@ -3322,14 +3421,10 @@ ExecSetupPartitionTupleRouting(Relation rel, (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc, gettext_noop("could not convert row type")); - InitResultRelInfo(leaf_part_rri, - partrel, - resultRTindex, - rel, - estate->es_instrument); - /* - * Verify result relation is a valid target for INSERT. + * Verify result relation is a valid target for insert operation. Even + * for updates, we are doing this for tuple-routing, so again, we need + * to check the validity for insert operation. */ CheckValidResultRel(leaf_part_rri, CMD_INSERT); @@ -3345,9 +3440,15 @@ ExecSetupPartitionTupleRouting(Relation rel, estate->es_leaf_result_relations = lappend(estate->es_leaf_result_relations, leaf_part_rri); - (*partitions)[i] = leaf_part_rri++; + (*partitions)[i] = leaf_part_rri; i++; } + + /* + * For UPDATE, we should have found all the per-subplan resultrels in the + * leaf partitions. + */ + Assert(num_update_rri == 0 || update_rri_index == num_update_rri); } /* @@ -3373,8 +3474,9 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, * First check the root table's partition constraint, if any. No point in * routing the tuple if it doesn't belong in the root table itself. */ - if (resultRelInfo->ri_PartitionCheck) - ExecPartitionCheck(resultRelInfo, slot, estate); + if (resultRelInfo->ri_PartitionCheck && + !ExecPartitionCheck(resultRelInfo, slot, estate)) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); result = get_partition_for_tuple(pd, slot, estate, &failed_at, &failed_slot); diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index fb538c0..e11f7cb 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -401,7 +401,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) /* Check the constraints of the tuple */ if (rel->rd_att->constr) - ExecConstraints(resultRelInfo, slot, estate); + ExecConstraints(resultRelInfo, slot, estate, true); /* Store the slot into tuple that we can inspect. */ tuple = ExecMaterializeSlot(slot); @@ -466,7 +466,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, /* Check the constraints of the tuple */ if (rel->rd_att->constr) - ExecConstraints(resultRelInfo, slot, estate); + ExecConstraints(resultRelInfo, slot, estate, true); /* Store the slot into tuple that we can write. */ tuple = ExecMaterializeSlot(slot); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 0027d21..750b0f7 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -45,6 +45,7 @@ #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#include "optimizer/var.h" #include "parser/parsetree.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" @@ -62,7 +63,16 @@ static bool ExecOnConflictUpdate(ModifyTableState *mtstate, EState *estate, bool canSetTag, TupleTableSlot **returning); - +static void ExecSetupChildParentMap(ModifyTableState *mtstate, + ResultRelInfo *rootRelInfo, + int numResultRelInfos, bool perleaf); +static TupleConversionMap *tupconv_map_for_subplan(ModifyTableState *node, + int whichplan); +static HeapTuple ConvertPartitionTupleSlot(ModifyTableState *mtstate, + TupleConversionMap *map, + HeapTuple tuple, + TupleTableSlot *new_slot, + TupleTableSlot **p_old_slot); /* * Verify that the tuples to be produced by INSERT or UPDATE match the * target relation's rowtype @@ -240,6 +250,38 @@ ExecCheckTIDVisible(EState *estate, ReleaseBuffer(buffer); } +/* + * ConvertPartitionTupleSlot -- convenience function for converting tuple and + * storing it into a tuple slot provided through 'new_slot', which typically + * should be one of the dedicated partition tuple slot. Passes the partition + * tuple slot back into output param p_old_slot. If no mapping present, keeps + * p_old_slot unchanged. + * + * Returns the converted tuple. + */ +static HeapTuple +ConvertPartitionTupleSlot(ModifyTableState *mtstate, + TupleConversionMap *map, + HeapTuple tuple, + TupleTableSlot *new_slot, + TupleTableSlot **p_old_slot) +{ + if (!map) + return tuple; + + tuple = do_convert_tuple(tuple, map); + + /* + * Change the partition tuple slot descriptor, as per converted tuple. + */ + *p_old_slot = new_slot; + Assert(new_slot != NULL); + ExecSetSlotDescriptor(new_slot, map->outdesc); + ExecStoreTuple(tuple, new_slot, InvalidBuffer, true); + + return tuple; +} + /* ---------------------------------------------------------------- * ExecInsert * @@ -265,6 +307,7 @@ ExecInsert(ModifyTableState *mtstate, Oid newId; List *recheckIndexes = NIL; TupleTableSlot *result = NULL; + TransitionCaptureState *transition_capture = mtstate->mt_transition_capture; /* * get the heap tuple out of the tuple table slot, making sure we have a @@ -281,17 +324,50 @@ ExecInsert(ModifyTableState *mtstate, if (mtstate->mt_partition_dispatch_info) { int leaf_part_index; - TupleConversionMap *map; + ResultRelInfo *rootResultRelInfo; + + /* + * If the original operation is UPDATE, the root partitioned table + * needs to be fetched from mtstate->rootResultRelInfo. + */ + rootResultRelInfo = (mtstate->rootResultRelInfo ? + mtstate->rootResultRelInfo : resultRelInfo); + + /* + * If the resultRelInfo is not the root partitioned table (which + * happens for UPDATE), we should convert the tuple into root's tuple + * descriptor, since ExecFindPartition() starts the search from root. + * The tuple conversion map list is in the order of + * mtstate->resultRelInfo[], so to retrieve the one for this resultRel, + * we need to know the position of the resultRel in + * mtstate->resultRelInfo[]. + */ + if (rootResultRelInfo != resultRelInfo) + { + int map_index = resultRelInfo - mtstate->resultRelInfo; + TupleConversionMap *tupconv_map; + + /* resultRelInfo must be one of the per-subplan result rels. */ + Assert(resultRelInfo >= mtstate->resultRelInfo && + resultRelInfo <= mtstate->resultRelInfo + mtstate->mt_nplans - 1); + + tupconv_map = tupconv_map_for_subplan(mtstate, map_index); + tuple = ConvertPartitionTupleSlot(mtstate, + tupconv_map, + tuple, + mtstate->mt_root_tuple_slot, + &slot); + } /* * Away we go ... If we end up not finding a partition after all, * ExecFindPartition() does not return and errors out instead. * Otherwise, the returned value is to be used as an index into arrays - * mt_partitions[] and mt_partition_tupconv_maps[] that will get us + * mt_partitions[] and mt_parentchild_tupconv_maps[] that will get us * the ResultRelInfo and TupleConversionMap for the partition, * respectively. */ - leaf_part_index = ExecFindPartition(resultRelInfo, + leaf_part_index = ExecFindPartition(rootResultRelInfo, mtstate->mt_partition_dispatch_info, slot, estate); @@ -330,8 +406,10 @@ ExecInsert(ModifyTableState *mtstate, * back to tuplestore format. */ mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; + + Assert(mtstate->mt_is_tupconv_perpart == true); mtstate->mt_transition_capture->tcs_map = - mtstate->mt_transition_tupconv_maps[leaf_part_index]; + mtstate->mt_childparent_tupconv_maps[leaf_part_index]; } else { @@ -344,30 +422,21 @@ ExecInsert(ModifyTableState *mtstate, } } if (mtstate->mt_oc_transition_capture != NULL) + { + Assert(mtstate->mt_is_tupconv_perpart == true); mtstate->mt_oc_transition_capture->tcs_map = - mtstate->mt_transition_tupconv_maps[leaf_part_index]; + mtstate->mt_childparent_tupconv_maps[leaf_part_index]; + } /* * We might need to convert from the parent rowtype to the partition * rowtype. */ - map = mtstate->mt_partition_tupconv_maps[leaf_part_index]; - if (map) - { - Relation partrel = resultRelInfo->ri_RelationDesc; - - tuple = do_convert_tuple(tuple, map); - - /* - * We must use the partition's tuple descriptor from this point - * on, until we're finished dealing with the partition. Use the - * dedicated slot for that. - */ - slot = mtstate->mt_partition_tuple_slot; - Assert(slot != NULL); - ExecSetSlotDescriptor(slot, RelationGetDescr(partrel)); - ExecStoreTuple(tuple, slot, InvalidBuffer, true); - } + tuple = ConvertPartitionTupleSlot(mtstate, + mtstate->mt_parentchild_tupconv_maps[leaf_part_index], + tuple, + mtstate->mt_partition_tuple_slot, + &slot); } resultRelationDesc = resultRelInfo->ri_RelationDesc; @@ -485,7 +554,7 @@ ExecInsert(ModifyTableState *mtstate, /* Check the constraints of the tuple */ if (resultRelationDesc->rd_att->constr || check_partition_constr) - ExecConstraints(resultRelInfo, slot, estate); + ExecConstraints(resultRelInfo, slot, estate, true); if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0) { @@ -621,9 +690,31 @@ ExecInsert(ModifyTableState *mtstate, setLastTid(&(tuple->t_self)); } + /* + * In case this is part of update tuple routing, put this row into the + * transition NEW TABLE if we are capturing transition tables. We need to + * do this separately for DELETE and INSERT because they happen on + * different tables. + */ + if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture + && mtstate->mt_transition_capture->tcs_update_new_table) + { + ExecARUpdateTriggers(estate, resultRelInfo, NULL, + NULL, + tuple, + NULL, + mtstate->mt_transition_capture); + + /* + * Now that we have already captured NEW TABLE row, any AR INSERT + * trigger should not again capture it below. Arrange for the same. + */ + transition_capture = NULL; + } + /* AFTER ROW INSERT Triggers */ ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes, - mtstate->mt_transition_capture); + transition_capture); list_free(recheckIndexes); @@ -677,6 +768,8 @@ ExecDelete(ModifyTableState *mtstate, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, + bool *delete_skipped, + bool process_returning, bool canSetTag) { ResultRelInfo *resultRelInfo; @@ -684,6 +777,10 @@ ExecDelete(ModifyTableState *mtstate, HTSU_Result result; HeapUpdateFailureData hufd; TupleTableSlot *slot = NULL; + TransitionCaptureState *transition_capture = mtstate->mt_transition_capture; + + if (delete_skipped) + *delete_skipped = true; /* * get information on the (current) result relation @@ -848,12 +945,39 @@ ldelete:; if (canSetTag) (estate->es_processed)++; + /* The delete has actually happened, so inform that to the caller */ + if (delete_skipped) + *delete_skipped = false; + + /* + * In case this is part of update tuple routing, put this row into the + * transition OLD TABLE if we are capturing transition tables. We need to + * do this separately for DELETE and INSERT because they happen on + * different tables. + */ + if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture + && mtstate->mt_transition_capture->tcs_update_old_table) + { + ExecARUpdateTriggers(estate, resultRelInfo, + tupleid, + oldtuple, + NULL, + NULL, + transition_capture); + + /* + * Now that we have already captured OLD TABLE row, any AR DELETE + * trigger should not again capture it below. Arrange for the same. + */ + transition_capture = NULL; + } + /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple, - mtstate->mt_transition_capture); + transition_capture); - /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) + /* Process RETURNING if present and if requested */ + if (process_returning && resultRelInfo->ri_projectReturning) { /* * We have to put the target tuple into a slot, which means first we @@ -946,6 +1070,7 @@ ExecUpdate(ModifyTableState *mtstate, HTSU_Result result; HeapUpdateFailureData hufd; List *recheckIndexes = NIL; + TupleConversionMap *saved_tcs_map = NULL; /* * abort the operation if not running transactions @@ -1042,12 +1167,82 @@ lreplace:; resultRelInfo, slot, estate); /* + * If a partition check fails, try to move the row into the right + * partition. + */ + if (resultRelInfo->ri_PartitionCheck && + !ExecPartitionCheck(resultRelInfo, slot, estate)) + { + bool delete_skipped; + TupleTableSlot *ret_slot; + + /* + * When an UPDATE is run with a leaf partition, we would not have + * partition tuple routing setup. In that case, fail with + * partition constraint violation error. + */ + if (mtstate->mt_partition_dispatch_info == NULL) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + + /* Do the row movement. */ + + /* + * Skip RETURNING processing for DELETE. We want to return rows + * from INSERT. + */ + ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate, estate, + &delete_skipped, false, false); + + /* + * For some reason if DELETE didn't happen (for e.g. trigger + * prevented it, or it was already deleted by self, or it was + * concurrently deleted by another transaction), then we should + * skip INSERT as well, otherwise, there will be effectively one + * new row inserted. + * + * For a normal UPDATE, the case where the tuple has been the + * subject of a concurrent UPDATE or DELETE would be handled by + * the EvalPlanQual machinery, but for an UPDATE that we've + * translated into a DELETE from this partition and an INSERT into + * some other partition, that's not available, because CTID chains + * can't span relation boundaries. We mimic the semantics to a + * limited extent by skipping the INSERT if the DELETE fails to + * find a tuple. This ensures that two concurrent attempts to + * UPDATE the same tuple at the same time can't turn one tuple + * into two, and that an UPDATE of a just-deleted tuple can't + * resurrect it. + */ + if (delete_skipped) + return NULL; + + if (mtstate->mt_transition_capture) + saved_tcs_map = mtstate->mt_transition_capture->tcs_map; + + ret_slot = ExecInsert(mtstate, slot, planSlot, NULL, + ONCONFLICT_NONE, estate, canSetTag); + + if (mtstate->mt_transition_capture) + { + /* + * Revert back to the transition capture map created for + * UPDATE; otherwise the next UPDATE will incorrectly use the + * one created for INESRT. + */ + mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; + mtstate->mt_transition_capture->tcs_map = saved_tcs_map; + } + return ret_slot; + } + + /* * Check the constraints of the tuple. Note that we pass the same * slot for the orig_slot argument, because unlike ExecInsert(), no * tuple-routing is performed here, hence the slot remains unchanged. + * We have already checked partition constraints above, so skip + * checking them here. */ - if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck) - ExecConstraints(resultRelInfo, slot, estate); + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate, false); /* * replace the heap tuple @@ -1475,7 +1670,6 @@ static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) { ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate); - int i; /* Check for transition tables on the directly targeted relation. */ mtstate->mt_transition_capture = @@ -1504,55 +1698,113 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) mtstate->mt_num_partitions : mtstate->mt_nplans); + ExecSetupChildParentMap(mtstate, targetRelInfo, numResultRelInfos, + (mtstate->mt_partition_dispatch_info != NULL)); + /* - * Build array of conversion maps from each child's TupleDesc to the - * one used in the tuplestore. The map pointers may be NULL when no - * conversion is necessary, which is hopefully a common case for - * partitions. + * Install the conversion map for the first plan for UPDATE and DELETE + * operations. It will be advanced each time we switch to the next + * plan. (INSERT operations set it every time, so we need not update + * mtstate->mt_oc_transition_capture here.) */ - mtstate->mt_transition_tupconv_maps = (TupleConversionMap **) - palloc0(sizeof(TupleConversionMap *) * numResultRelInfos); + if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT) + mtstate->mt_transition_capture->tcs_map = + tupconv_map_for_subplan(mtstate, 0); + } +} - /* Choose the right set of partitions */ - if (mtstate->mt_partition_dispatch_info != NULL) - { - /* - * For tuple routing among partitions, we need TupleDescs based - * on the partition routing table. - */ - ResultRelInfo **resultRelInfos = mtstate->mt_partitions; +/* + * Initialize the child-to-root tuple conversion map array. + */ +static void +ExecSetupChildParentMap(ModifyTableState *mtstate, + ResultRelInfo *rootRelInfo, + int numResultRelInfos, bool perleaf) +{ + TupleDesc outdesc; + int i; - for (i = 0; i < numResultRelInfos; ++i) - { - mtstate->mt_transition_tupconv_maps[i] = - convert_tuples_by_name(RelationGetDescr(resultRelInfos[i]->ri_RelationDesc), - RelationGetDescr(targetRelInfo->ri_RelationDesc), - gettext_noop("could not convert row type")); - } - } - else - { - /* Otherwise we need the ResultRelInfo for each subplan. */ - ResultRelInfo *resultRelInfos = mtstate->resultRelInfo; + /* First check if there is already one */ + if (mtstate->mt_childparent_tupconv_maps) + return; - for (i = 0; i < numResultRelInfos; ++i) - { - mtstate->mt_transition_tupconv_maps[i] = - convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), - RelationGetDescr(targetRelInfo->ri_RelationDesc), - gettext_noop("could not convert row type")); - } + /* Get tuple descriptor of the root partitioned table. */ + outdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc); + + /* + * Build array of conversion maps from each child's TupleDesc to the + * one used in the tuplestore. The map pointers may be NULL when no + * conversion is necessary, which is hopefully a common case for + * partitions. + */ + mtstate->mt_childparent_tupconv_maps = (TupleConversionMap **) + palloc0(sizeof(TupleConversionMap *) * numResultRelInfos); + + /* Choose the right set of partitions */ + if (perleaf) + { + /* + * For tuple routing among partitions, we need TupleDescs based + * on the partition routing table. + */ + ResultRelInfo **resultRelInfos = mtstate->mt_partitions; + + for (i = 0; i < numResultRelInfos; ++i) + { + mtstate->mt_childparent_tupconv_maps[i] = + convert_tuples_by_name(RelationGetDescr(resultRelInfos[i]->ri_RelationDesc), + outdesc, + gettext_noop("could not convert row type")); } /* - * Install the conversion map for the first plan for UPDATE and DELETE - * operations. It will be advanced each time we switch to the next - * plan. (INSERT operations set it every time, so we need not update - * mtstate->mt_oc_transition_capture here.) + * Save the info that the tuple conversion map is per-leaf, not + * per-subplan */ - if (mtstate->mt_transition_capture) - mtstate->mt_transition_capture->tcs_map = - mtstate->mt_transition_tupconv_maps[0]; + mtstate->mt_is_tupconv_perpart = true; + } + else + { + /* Otherwise we need the ResultRelInfo for each subplan. */ + ResultRelInfo *resultRelInfos = mtstate->resultRelInfo; + + for (i = 0; i < numResultRelInfos; ++i) + { + mtstate->mt_childparent_tupconv_maps[i] = + convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), + outdesc, + gettext_noop("could not convert row type")); + } + } + +} + +/* + * For a given subplan index, get the tuple conversion map. + */ +static TupleConversionMap * +tupconv_map_for_subplan(ModifyTableState *mtstate, int whichplan) +{ + Assert(mtstate->mt_childparent_tupconv_maps != NULL); + + /* + * If the tuple conversion map array is per-partition, we need to first get + * the index into the partition array. + */ + if (mtstate->mt_is_tupconv_perpart) + { + int leaf_index; + + Assert(mtstate->mt_subplan_partition_offsets != NULL); + leaf_index = mtstate->mt_subplan_partition_offsets[whichplan]; + + Assert(leaf_index >= 0 && leaf_index < mtstate->mt_num_partitions); + return mtstate->mt_childparent_tupconv_maps[leaf_index]; + } + else + { + Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans); + return mtstate->mt_childparent_tupconv_maps[whichplan]; } } @@ -1659,15 +1911,13 @@ ExecModifyTable(PlanState *pstate) /* Prepare to convert transition tuples from this child. */ if (node->mt_transition_capture != NULL) { - Assert(node->mt_transition_tupconv_maps != NULL); node->mt_transition_capture->tcs_map = - node->mt_transition_tupconv_maps[node->mt_whichplan]; + tupconv_map_for_subplan(node, node->mt_whichplan); } if (node->mt_oc_transition_capture != NULL) { - Assert(node->mt_transition_tupconv_maps != NULL); node->mt_oc_transition_capture->tcs_map = - node->mt_transition_tupconv_maps[node->mt_whichplan]; + tupconv_map_for_subplan(node, node->mt_whichplan); } continue; } @@ -1783,7 +2033,8 @@ ExecModifyTable(PlanState *pstate) break; case CMD_DELETE: slot = ExecDelete(node, tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, + NULL, true, node->canSetTag); break; default: elog(ERROR, "unknown operation"); @@ -1828,9 +2079,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ResultRelInfo *resultRelInfo; TupleDesc tupDesc; Plan *subplan; + int firstVarno = 0; + Relation firstResultRel = NULL; ListCell *l; int i; Relation rel; + bool update_tuple_routing_needed = node->part_cols_updated; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); @@ -1903,6 +2157,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) resultRelInfo->ri_IndexRelationDescs == NULL) ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE); + /* + * If this is an UPDATE and a BEFORE UPDATE trigger is present, we may + * need to do update tuple routing. + */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_update_before_row && + operation == CMD_UPDATE) + update_tuple_routing_needed = true; + /* Now init the plan for this result rel */ estate->es_result_relation_info = resultRelInfo; mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); @@ -1940,31 +2203,51 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) else rel = mtstate->resultRelInfo->ri_RelationDesc; - /* Build state for INSERT tuple routing */ - if (operation == CMD_INSERT && - rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + /* Decide whether we need to perform update tuple routing. */ + if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + update_tuple_routing_needed = false; + + /* + * Build state for tuple routing if it's an INSERT or if it's an UPDATE of + * partition key. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && + (operation == CMD_INSERT || update_tuple_routing_needed)) { PartitionDispatch *partition_dispatch_info; ResultRelInfo **partitions; TupleConversionMap **partition_tupconv_maps; + int *subplan_leaf_map; TupleTableSlot *partition_tuple_slot; int num_parted, num_partitions; ExecSetupPartitionTupleRouting(rel, + mtstate->resultRelInfo, + (operation == CMD_UPDATE ? nplans : 0), node->nominalRelation, estate, &partition_dispatch_info, &partitions, &partition_tupconv_maps, + &subplan_leaf_map, &partition_tuple_slot, &num_parted, &num_partitions); mtstate->mt_partition_dispatch_info = partition_dispatch_info; mtstate->mt_num_dispatch = num_parted; mtstate->mt_partitions = partitions; mtstate->mt_num_partitions = num_partitions; - mtstate->mt_partition_tupconv_maps = partition_tupconv_maps; + mtstate->mt_parentchild_tupconv_maps = partition_tupconv_maps; + mtstate->mt_subplan_partition_offsets = subplan_leaf_map; mtstate->mt_partition_tuple_slot = partition_tuple_slot; + mtstate->mt_root_tuple_slot = MakeTupleTableSlot(); + + /* + * Below are required as reference objects for mapping partition + * attno's in expressions such as WCO and RETURNING. + */ + firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; + firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc; } /* @@ -1975,6 +2258,18 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecSetupTransitionCaptureState(mtstate, estate); /* + * Construct mapping from each of the partition attnos to the root attno. + * This is required when during update row movement the tuple descriptor of + * a source partition does not match the root partitioned table descriptor. + * In such a case we need to convert tuples to the root tuple descriptor, + * because the search for destination partition starts from the root. Skip + * this setup if it's not a partition key update. + */ + if (update_tuple_routing_needed) + ExecSetupChildParentMap(mtstate, getASTriggerResultRelInfo(mtstate), + mtstate->mt_nplans, false); + + /* * Initialize any WITH CHECK OPTION constraints if needed. */ resultRelInfo = mtstate->resultRelInfo; @@ -2004,26 +2299,29 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Build WITH CHECK OPTION constraints for each leaf partition rel. Note * that we didn't build the withCheckOptionList for each partition within * the planner, but simple translation of the varattnos for each partition - * will suffice. This only occurs for the INSERT case; UPDATE/DELETE - * cases are handled above. + * will suffice. This only occurs for the INSERT case or for UPDATE row + * movement. DELETEs and local UPDATEs are handled above. */ if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0) { - List *wcoList; - PlanState *plan; + List *first_wcoList; /* * In case of INSERT on partitioned tables, there is only one plan. * Likewise, there is only one WITH CHECK OPTIONS list, not one per - * partition. We make a copy of the WCO qual for each partition; note - * that, if there are SubPlans in there, they all end up attached to - * the one parent Plan node. + * partition. Whereas for UPDATE, there are as many WCOs as there are + * plans. So in either case, use the WCO expression of the first + * resultRelInfo as a reference to calculate attno's for the WCO + * expression of each of the partitions. We make a copy of the WCO + * qual for each partition. Note that, if there are SubPlans in there, + * they all end up attached to the one parent Plan node. */ - Assert(operation == CMD_INSERT && - list_length(node->withCheckOptionLists) == 1 && - mtstate->mt_nplans == 1); - wcoList = linitial(node->withCheckOptionLists); - plan = mtstate->mt_plans[0]; + Assert(update_tuple_routing_needed || + (operation == CMD_INSERT && + list_length(node->withCheckOptionLists) == 1 && + mtstate->mt_nplans == 1)); + + first_wcoList = linitial(node->withCheckOptionLists); for (i = 0; i < mtstate->mt_num_partitions; i++) { Relation partrel; @@ -2032,17 +2330,26 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ListCell *ll; resultRelInfo = mtstate->mt_partitions[i]; + + /* + * If we are referring to a resultRelInfo from one of the update + * result rels, that result rel would already have WithCheckOptions + * initialized. + */ + if (resultRelInfo->ri_WithCheckOptions) + continue; + partrel = resultRelInfo->ri_RelationDesc; - /* varno = node->nominalRelation */ - mapped_wcoList = map_partition_varattnos(wcoList, - node->nominalRelation, - partrel, rel, NULL); + mapped_wcoList = map_partition_varattnos(first_wcoList, + firstVarno, + partrel, firstResultRel, + NULL); foreach(ll, mapped_wcoList) { WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll)); ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual), - plan); + &mtstate->ps); wcoExprs = lappend(wcoExprs, wcoExpr); } @@ -2059,7 +2366,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) { TupleTableSlot *slot; ExprContext *econtext; - List *returningList; + List *firstReturningList; /* * Initialize result tuple slot and assign its rowtype using the first @@ -2096,22 +2403,35 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Build a projection for each leaf partition rel. Note that we * didn't build the returningList for each partition within the * planner, but simple translation of the varattnos for each partition - * will suffice. This only occurs for the INSERT case; UPDATE/DELETE - * are handled above. + * will suffice. This only occurs for the INSERT case or for UPDATE + * row movement. DELETEs and local UPDATEs are handled above. */ - returningList = linitial(node->returningLists); + firstReturningList = linitial(node->returningLists); for (i = 0; i < mtstate->mt_num_partitions; i++) { Relation partrel; List *rlist; resultRelInfo = mtstate->mt_partitions[i]; + + /* + * If we are referring to a resultRelInfo from one of the update + * result rels, that result rel would already have a returningList + * built. + */ + if (resultRelInfo->ri_projectReturning) + continue; + partrel = resultRelInfo->ri_RelationDesc; - /* varno = node->nominalRelation */ - rlist = map_partition_varattnos(returningList, - node->nominalRelation, - partrel, rel, NULL); + /* + * Use the returning expression of the first resultRelInfo as a + * reference to calculate attno's for the returning expression of + * each of the partitions. + */ + rlist = map_partition_varattnos(firstReturningList, + firstVarno, + partrel, firstResultRel, NULL); resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, resultRelInfo->ri_RelationDesc->rd_att); @@ -2356,6 +2676,7 @@ void ExecEndModifyTable(ModifyTableState *node) { int i; + CmdType operation = node->operation; /* * Allow any FDWs to shut down @@ -2390,11 +2711,23 @@ ExecEndModifyTable(ModifyTableState *node) { ResultRelInfo *resultRelInfo = node->mt_partitions[i]; + /* + * If this result rel is one of the subplan result rels, let + * ExecEndPlan() close it. For INSERTs, this does not apply because + * all leaf partition result rels are anyway newly allocated. + */ + if (operation == CMD_UPDATE && + resultRelInfo >= node->resultRelInfo && + resultRelInfo < node->resultRelInfo + node->mt_nplans) + continue; + ExecCloseIndices(resultRelInfo); heap_close(resultRelInfo->ri_RelationDesc, NoLock); } - /* Release the standalone partition tuple descriptor, if any */ + /* Release the standalone partition tuple descriptors, if any */ + if (node->mt_root_tuple_slot) + ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot); if (node->mt_partition_tuple_slot) ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot); diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index cadd253..35edd66 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -204,6 +204,7 @@ _copyModifyTable(const ModifyTable *from) COPY_SCALAR_FIELD(canSetTag); COPY_SCALAR_FIELD(nominalRelation); COPY_NODE_FIELD(partitioned_rels); + COPY_SCALAR_FIELD(part_cols_updated); COPY_NODE_FIELD(resultRelations); COPY_SCALAR_FIELD(resultRelIndex); COPY_SCALAR_FIELD(rootResultRelIndex); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 291d1ee..48099ca 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -372,6 +372,7 @@ _outModifyTable(StringInfo str, const ModifyTable *node) WRITE_BOOL_FIELD(canSetTag); WRITE_UINT_FIELD(nominalRelation); WRITE_NODE_FIELD(partitioned_rels); + WRITE_BOOL_FIELD(part_cols_updated); WRITE_NODE_FIELD(resultRelations); WRITE_INT_FIELD(resultRelIndex); WRITE_INT_FIELD(rootResultRelIndex); @@ -2100,6 +2101,7 @@ _outModifyTablePath(StringInfo str, const ModifyTablePath *node) WRITE_BOOL_FIELD(canSetTag); WRITE_UINT_FIELD(nominalRelation); WRITE_NODE_FIELD(partitioned_rels); + WRITE_BOOL_FIELD(part_cols_updated); WRITE_NODE_FIELD(resultRelations); WRITE_NODE_FIELD(subpaths); WRITE_NODE_FIELD(subroots); diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 42c595d..7293d8a 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -1568,6 +1568,7 @@ _readModifyTable(void) READ_BOOL_FIELD(canSetTag); READ_UINT_FIELD(nominalRelation); READ_NODE_FIELD(partitioned_rels); + READ_BOOL_FIELD(part_cols_updated); READ_NODE_FIELD(resultRelations); READ_INT_FIELD(resultRelIndex); READ_INT_FIELD(rootResultRelIndex); diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 9c74e39..524ba00 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -278,6 +278,7 @@ static ProjectSet *make_project_set(List *tlist, Plan *subplan); static ModifyTable *make_modifytable(PlannerInfo *root, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, + bool part_cols_updated, List *resultRelations, List *subplans, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam); @@ -2371,6 +2372,7 @@ create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path) best_path->canSetTag, best_path->nominalRelation, best_path->partitioned_rels, + best_path->part_cols_updated, best_path->resultRelations, subplans, best_path->withCheckOptionLists, @@ -6427,6 +6429,7 @@ static ModifyTable * make_modifytable(PlannerInfo *root, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, + bool part_cols_updated, List *resultRelations, List *subplans, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam) @@ -6453,6 +6456,7 @@ make_modifytable(PlannerInfo *root, node->canSetTag = canSetTag; node->nominalRelation = nominalRelation; node->partitioned_rels = partitioned_rels; + node->part_cols_updated = part_cols_updated; node->resultRelations = resultRelations; node->resultRelIndex = -1; /* will be set correctly in setrefs.c */ node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 9b7a8fd..9a8015e 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -111,6 +111,10 @@ typedef struct /* Local functions */ static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind); static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode); +static void get_all_partition_cols(List *rtables, + Index root_rti, + List *partitioned_rels, + Bitmapset **all_part_cols); static void inheritance_planner(PlannerInfo *root); static void grouping_planner(PlannerInfo *root, bool inheritance_update, double tuple_fraction); @@ -1048,6 +1052,40 @@ preprocess_phv_expression(PlannerInfo *root, Expr *expr) } /* + * get_all_partition_cols + * Get attribute numbers of all partition key columns of all the partitioned + * tables. + * + * All the child partition attribute numbers are converted to the root + * partitioned table. + */ +static void +get_all_partition_cols(List *rtables, + Index root_rti, + List *partitioned_rels, + Bitmapset **all_part_cols) +{ + ListCell *lc; + Oid root_relid = getrelid(root_rti, rtables); + Relation root_rel; + + /* The caller must have already locked all the partitioned tables. */ + root_rel = heap_open(root_relid, NoLock); + *all_part_cols = NULL; + foreach(lc, partitioned_rels) + { + Index rti = lfirst_int(lc); + Oid relid = getrelid(rti, rtables); + Relation part_rel = heap_open(relid, NoLock); + + pull_child_partition_columns(part_rel, root_rel, all_part_cols); + heap_close(part_rel, NoLock); + } + + heap_close(root_rel, NoLock); +} + +/* * inheritance_planner * Generate Paths in the case where the result relation is an * inheritance set. @@ -1092,6 +1130,7 @@ inheritance_planner(PlannerInfo *root) Query *parent_parse; Bitmapset *parent_relids = bms_make_singleton(top_parentRTindex); PlannerInfo **parent_roots = NULL; + bool part_cols_updated = false; Assert(parse->commandType != CMD_INSERT); @@ -1162,10 +1201,23 @@ inheritance_planner(PlannerInfo *root) parent_rte = rt_fetch(top_parentRTindex, root->parse->rtable); if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE) { + Bitmapset *all_part_cols = NULL; + nominalRelation = top_parentRTindex; partitioned_rels = get_partitioned_child_rels(root, top_parentRTindex); /* The root partitioned table is included as a child rel */ Assert(list_length(partitioned_rels) >= 1); + + /* + * Retrieve the partition key columns of all the partitioned tables, + * so as to check whether any of the columns being updated is + * a partition key of any of the partition tables. + */ + get_all_partition_cols(root->parse->rtable, top_parentRTindex, + partitioned_rels, &all_part_cols); + + if (bms_overlap(all_part_cols, parent_rte->updatedCols)) + part_cols_updated = true; } /* @@ -1503,6 +1555,7 @@ inheritance_planner(PlannerInfo *root) parse->canSetTag, nominalRelation, partitioned_rels, + part_cols_updated, resultRelations, subpaths, subroots, @@ -2120,6 +2173,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, parse->canSetTag, parse->resultRelation, NIL, + false, list_make1_int(parse->resultRelation), list_make1(path), list_make1(root), diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 36ec025..3c93952 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -3194,6 +3194,8 @@ create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, * 'partitioned_rels' is an integer list of RT indexes of non-leaf tables in * the partition tree, if this is an UPDATE/DELETE to a partitioned table. * Otherwise NIL. + * 'part_cols_updated' if any partitioning columns are being updated, either + * from the named relation or a descendent partitione table. * 'resultRelations' is an integer list of actual RT indexes of target rel(s) * 'subpaths' is a list of Path(s) producing source data (one per rel) * 'subroots' is a list of PlannerInfo structs (one per rel) @@ -3207,6 +3209,7 @@ ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, + bool part_cols_updated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, @@ -3274,6 +3277,7 @@ create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, pathnode->canSetTag = canSetTag; pathnode->nominalRelation = nominalRelation; pathnode->partitioned_rels = list_copy(partitioned_rels); + pathnode->part_cols_updated = part_cols_updated; pathnode->resultRelations = resultRelations; pathnode->subpaths = subpaths; pathnode->subroots = subroots; diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h index 8acc01a..6a18d32 100644 --- a/src/include/catalog/partition.h +++ b/src/include/catalog/partition.h @@ -85,8 +85,8 @@ extern void check_new_partition_bound(char *relname, Relation parent, extern Oid get_partition_parent(Oid relid); extern List *get_qual_from_partbound(Relation rel, Relation parent, PartitionBoundSpec *spec); -extern List *map_partition_varattnos(List *expr, int target_varno, - Relation partrel, Relation parent, +extern List *map_partition_varattnos(List *expr, int fromrel_varno, + Relation to_rel, Relation from_rel, bool *found_whole_row); extern List *RelationGetPartitionQual(Relation rel); extern Expr *get_partition_qual_relid(Oid relid); @@ -104,6 +104,9 @@ extern int get_partition_for_tuple(PartitionDispatch *pd, EState *estate, PartitionDispatchData **failed_at, TupleTableSlot **failed_slot); +extern void pull_child_partition_columns(Relation rel, + Relation parent, + Bitmapset **partcols); extern Oid get_default_oid_from_partdesc(PartitionDesc partdesc); extern Oid get_default_partition_oid(Oid parentId); extern void update_default_partition_oid(Oid parentId, Oid defaultPartId); diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index c4ecf0d..f39bb8d 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -187,7 +187,10 @@ extern ResultRelInfo *ExecGetTriggerResultRel(EState *estate, Oid relid); extern void ExecCleanUpTriggerState(EState *estate); extern bool ExecContextForcesOids(PlanState *planstate, bool *hasoids); extern void ExecConstraints(ResultRelInfo *resultRelInfo, - TupleTableSlot *slot, EState *estate); + TupleTableSlot *slot, EState *estate, + bool check_partition_constraint); +extern void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate); extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo); @@ -207,17 +210,22 @@ extern void EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple); extern HeapTuple EvalPlanQualGetTuple(EPQState *epqstate, Index rti); extern void ExecSetupPartitionTupleRouting(Relation rel, + ResultRelInfo *update_rri, + int num_update_rri, Index resultRTindex, EState *estate, PartitionDispatch **pd, ResultRelInfo ***partitions, TupleConversionMap ***tup_conv_maps, + int **subplan_leaf_map, TupleTableSlot **partition_tuple_slot, int *num_parted, int *num_partitions); extern int ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, TupleTableSlot *slot, EState *estate); +extern bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate); #define EvalPlanQualSetSlot(epqstate, slot) ((epqstate)->origslot = (slot)) extern void EvalPlanQualFetchRowMarks(EPQState *epqstate); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index e05bc04..d2e8060 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -982,15 +982,19 @@ typedef struct ModifyTableState int mt_num_partitions; /* Number of members in the following * arrays */ ResultRelInfo **mt_partitions; /* Per partition result relation pointers */ - TupleConversionMap **mt_partition_tupconv_maps; - /* Per partition tuple conversion map */ TupleTableSlot *mt_partition_tuple_slot; + TupleTableSlot *mt_root_tuple_slot; struct TransitionCaptureState *mt_transition_capture; /* controls transition table population for specified operation */ struct TransitionCaptureState *mt_oc_transition_capture; /* controls transition table population for INSERT...ON CONFLICT UPDATE */ - TupleConversionMap **mt_transition_tupconv_maps; - /* Per plan/partition tuple conversion */ + TupleConversionMap **mt_parentchild_tupconv_maps; + /* Per partition map for tuple conversion from root to leaf */ + TupleConversionMap **mt_childparent_tupconv_maps; + /* Per plan/partition map for tuple conversion from child to root */ + bool mt_is_tupconv_perpart; /* Is the above map per-partition ? */ + int *mt_subplan_partition_offsets; + /* Stores position of update result rels in leaf partitions */ } ModifyTableState; /* ---------------- diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index dd74efa..c414755 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -219,6 +219,7 @@ typedef struct ModifyTable Index nominalRelation; /* Parent RT index for use of EXPLAIN */ /* RT indexes of non-leaf tables in a partition tree */ List *partitioned_rels; + bool part_cols_updated; /* some part col in hierarchy updated */ List *resultRelations; /* integer list of RT indexes */ int resultRelIndex; /* index of first resultRel in plan's list */ int rootResultRelIndex; /* index of the partitioned table root */ diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 05fc9a3..30d307d 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -1667,6 +1667,7 @@ typedef struct ModifyTablePath Index nominalRelation; /* Parent RT index for use of EXPLAIN */ /* RT indexes of non-leaf tables in a partition tree */ List *partitioned_rels; + bool part_cols_updated; /* some part col in hierarchy updated */ List *resultRelations; /* integer list of RT indexes */ List *subpaths; /* Path(s) producing source data */ List *subroots; /* per-target-table PlannerInfos */ @@ -2109,6 +2110,10 @@ typedef struct AppendRelInfo * The child_rels list must contain at least one element, because the parent * partitioned table is itself counted as a child. * + * all_part_cols contains all attribute numbers from the parent that are + * used as partitioning columns by the parent or some descendent which is + * itself partitioned. + * * These structs are kept in the PlannerInfo node's pcinfo_list. */ typedef struct PartitionedChildRelInfo diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index e9ed16a..4b4485f 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -238,6 +238,7 @@ extern ModifyTablePath *create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, Index nominalRelation, List *partitioned_rels, + bool part_cols_updated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out index a4fe961..50b76cf 100644 --- a/src/test/regress/expected/update.out +++ b/src/test/regress/expected/update.out @@ -198,36 +198,367 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a) DROP TABLE update_test; DROP TABLE upsert_test; --- update to a partition should check partition bound constraint for the new tuple -create table range_parted ( +--------------------------- +-- UPDATE with row movement +--------------------------- +-- update to a partition should check partition bound constraint for the new tuple. +-- If partition key is updated, the row should be moved to the appropriate +-- partition. updatable views using partitions should enforce the check options +-- for the rows that have been moved. +create table mintab(c1 int); +insert into mintab values (120); +CREATE TABLE range_parted ( a text, - b int + b bigint, + c numeric, + d int, + e varchar ) partition by range (a, b); -create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); -create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); +CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 from mintab) WITH CHECK OPTION; +-- Create partitions intentionally in descending bound order, so as to test +-- that update-row-movement works with the leaf partitions not in bound order. +create table part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int); +alter table range_parted attach partition part_b_20_b_30 for values from ('b', 20) to ('b', 30); +create table part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) partition by range (c); create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10); -create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20); -insert into part_a_1_a_10 values ('a', 1); -insert into part_b_10_b_20 values ('b', 10); --- fail -update part_a_1_a_10 set a = 'b' where a = 'a'; -ERROR: new row for relation "part_a_1_a_10" violates partition constraint -DETAIL: Failing row contains (b, 1). -update range_parted set b = b - 1 where b = 10; -ERROR: new row for relation "part_b_10_b_20" violates partition constraint -DETAIL: Failing row contains (b, 9). --- ok -update range_parted set b = b + 1 where b = 10; +alter table range_parted attach partition part_b_10_b_20 for values from ('b', 10) to ('b', 20); +create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); +create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); +-- This tests partition-key UPDATE on a partitioned table that does not have any child partitions +update part_b_10_b_20 set b = b - 6; +-- As mentioned above, the partition creation is intentionally kept in descending bound order. +create table part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) partition by range (d); +create table part_d_1_15 partition of part_c_100_200 for values from (1) to (15); +create table part_d_15_20 partition of part_c_100_200 for values from (15) to (20); +alter table part_b_10_b_20 attach partition part_c_100_200 for values from (100) to (200); +create table part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); +alter table part_b_10_b_20 attach partition part_c_1_100 for values from (1) to (100); +\set init_range_parted 'truncate range_parted; insert into range_parted values (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted order by 1, 2, 3, 4, 5, 6' +:init_range_parted; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | +(6 rows) + +-- The order of subplans should be in bound order +explain (costs off) update range_parted set c = c - 50 where c > 97; + QUERY PLAN +------------------------------------- + Update on range_parted + Update on part_a_1_a_10 + Update on part_a_10_a_20 + Update on part_b_1_b_10 + Update on part_c_1_100 + Update on part_d_1_15 + Update on part_d_15_20 + Update on part_b_20_b_30 + -> Seq Scan on part_a_1_a_10 + Filter: (c > '97'::numeric) + -> Seq Scan on part_a_10_a_20 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_1_b_10 + Filter: (c > '97'::numeric) + -> Seq Scan on part_c_1_100 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_1_15 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_15_20 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_20_b_30 + Filter: (c > '97'::numeric) +(22 rows) + +-- fail (row movement happens only within the partition subtree) : +update part_c_100_200 set c = c - 20, d = c where c = 105; +ERROR: new row for relation "part_c_100_200" violates partition constraint +DETAIL: Failing row contains (null, 85, b, 15, 105). +-- fail (no partition key update, so no attempt to move tuple, but "a = 'a'" violates partition constraint enforced by root partition) +update part_b_10_b_20 set a = 'a'; +ERROR: new row for relation "part_c_1_100" violates partition constraint +DETAIL: Failing row contains (null, 1, 96, 12, a). +-- success; partition key update, no constraint violation +update range_parted set d = d - 10 where d > 10; +-- success; no partition key update, no constraint violation +update range_parted set e = d; +-- No row found : +update part_c_1_100 set c = c + 20 where c = 98; +-- ok (row movement) +update part_b_10_b_20 set c = c + 20 returning c, b, a; + c | b | a +-----+----+--- + 116 | 12 | b + 117 | 13 | b + 125 | 15 | b + 125 | 17 | b +(4 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+---+--- + part_a_10_a_20 | a | 10 | 200 | 1 | 1 + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 13 | 117 | 2 | 2 + part_d_1_15 | b | 15 | 125 | 6 | 6 + part_d_1_15 | b | 17 | 125 | 9 | 9 +(6 rows) + +-- fail (row movement happens only within the partition subtree) : +update part_b_10_b_20 set b = b - 6 where c > 116 returning *; +ERROR: new row for relation "part_d_1_15" violates partition constraint +DETAIL: Failing row contains (2, 117, b, 7, 2). +-- ok (row movement, with subset of rows moved into different partition) +update range_parted set b = b - 6 where c > 116 returning a, b + c; + a | ?column? +---+---------- + a | 204 + b | 124 + b | 134 + b | 136 +(4 rows) + +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_a_1_a_10 | a | 4 | 200 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 +(6 rows) + +-- update partition key using updatable view. +-- succeeds +update upview set c = 199 where b = 4; +-- fail, check option violation +update upview set c = 120 where b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (a, 4, 120, 1, 1). +-- fail, row movement with check option violation +update upview set a = 'b', b = 15, c = 120 where b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (b, 15, 120, 1, 1). +-- succeeds, row movement , check option passes +update upview set a = 'b', b = 15 where b = 4; +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 15 | 199 | 1 | 1 +(6 rows) + +-- cleanup +drop view upview; +-- RETURNING having whole-row vars. +---------------------------------- +:init_range_parted; +update range_parted set c = 95 where a = 'b' and b > 10 and c > 100 returning (range_parted) , *; + range_parted | a | b | c | d | e +---------------+---+----+----+----+--- + (b,15,95,16,) | b | 15 | 95 | 16 | + (b,17,95,19,) | b | 17 | 95 | 19 | +(2 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 95 | 16 | + part_c_1_100 | b | 17 | 95 | 19 | +(6 rows) + +-- Transition tables with update row movement +--------------------------------------------- +:init_range_parted; +create function trans_updatetrigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by a) from old_table), + (select string_agg(new_table::text, ', ' order by a) from new_table); + return null; + end; +$$; +create trigger trans_updatetrig + after update on range_parted referencing old table as old_table new table as new_table + for each statement execute procedure trans_updatetrigfunc(); +update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,110,1,), (b,13,98,2,), (b,15,106,16,), (b,17,106,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 13 | 98 | 2 | + part_d_15_20 | b | 15 | 106 | 16 | + part_d_15_20 | b | 17 | 106 | 19 | + part_d_1_15 | b | 12 | 110 | 1 | +(6 rows) + +:init_range_parted; +-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers +-- should not cause DELETEd rows to be captured twice. Similar thing for +-- INSERT triggers and inserted rows. +create trigger trans_deletetrig + after delete on range_parted referencing old table as old_table + for each statement execute procedure trans_updatetrigfunc(); +create trigger trans_inserttrig + after insert on range_parted referencing new table as new_table + for each statement execute procedure trans_updatetrigfunc(); +update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,146,1,), (b,13,147,2,), (b,15,155,16,), (b,17,155,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 15 | 155 | 16 | + part_d_15_20 | b | 17 | 155 | 19 | + part_d_1_15 | b | 12 | 146 | 1 | + part_d_1_15 | b | 13 | 147 | 2 | +(6 rows) + +drop trigger trans_updatetrig ON range_parted; +drop trigger trans_deletetrig ON range_parted; +drop trigger trans_inserttrig ON range_parted; +-- Install BR triggers on child partition, so that transition tuple conversion takes place. +create function func_parted_mod_b() returns trigger as $$ +begin + NEW.b = NEW.b + 1; + return NEW; +end $$ language plpgsql; +create trigger trig_c1_100 before update or insert on part_c_1_100 + for each row execute procedure func_parted_mod_b(); +create trigger trig_d1_15 before update or insert on part_d_1_15 + for each row execute procedure func_parted_mod_b(); +create trigger trig_d15_20 before update or insert on part_d_15_20 + for each row execute procedure func_parted_mod_b(); +:init_range_parted; +update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 15 | 98 | 2 | + part_d_15_20 | b | 17 | 106 | 16 | + part_d_15_20 | b | 19 | 106 | 19 | + part_d_1_15 | b | 15 | 110 | 1 | +(6 rows) + +:init_range_parted; +update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 17 | 155 | 16 | + part_d_15_20 | b | 19 | 155 | 19 | + part_d_1_15 | b | 15 | 146 | 1 | + part_d_1_15 | b | 16 | 147 | 2 | +(6 rows) + +drop trigger trig_c1_100 ON part_c_1_100; +drop trigger trig_d1_15 ON part_d_1_15; +drop trigger trig_d15_20 ON part_d_15_20; +drop function func_parted_mod_b(); +-- statement triggers with update row movement +--------------------------------------------------- +:init_range_parted; +create function trigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = % fired on table % during %', + TG_NAME, TG_TABLE_NAME, TG_OP; + return null; + end; +$$; +-- Triggers on root partition +create trigger parent_delete_trig + after delete on range_parted for each statement execute procedure trigfunc(); +create trigger parent_update_trig + after update on range_parted for each statement execute procedure trigfunc(); +create trigger parent_insert_trig + after insert on range_parted for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_c_1_100 +create trigger c1_delete_trig + after delete on part_c_1_100 for each statement execute procedure trigfunc(); +create trigger c1_update_trig + after update on part_c_1_100 for each statement execute procedure trigfunc(); +create trigger c1_insert_trig + after insert on part_c_1_100 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_1_15 +create trigger d1_delete_trig + after delete on part_d_1_15 for each statement execute procedure trigfunc(); +create trigger d1_update_trig + after update on part_d_1_15 for each statement execute procedure trigfunc(); +create trigger d1_insert_trig + after insert on part_d_1_15 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_15_20 +create trigger d15_delete_trig + after delete on part_d_15_20 for each statement execute procedure trigfunc(); +create trigger d15_update_trig + after update on part_d_15_20 for each statement execute procedure trigfunc(); +create trigger d15_insert_trig + after insert on part_d_15_20 for each statement execute procedure trigfunc(); +-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or insert statement triggers should be fired. +update range_parted set c = c - 50 where c > 97; +NOTICE: trigger = parent_update_trig fired on table range_parted during UPDATE +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 150 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 55 | 16 | + part_c_1_100 | b | 17 | 55 | 19 | +(6 rows) + +drop trigger parent_delete_trig ON range_parted; +drop trigger parent_update_trig ON range_parted; +drop trigger parent_insert_trig ON range_parted; +drop trigger c1_delete_trig ON part_c_1_100; +drop trigger c1_update_trig ON part_c_1_100; +drop trigger c1_insert_trig ON part_c_1_100; +drop trigger d1_delete_trig ON part_d_1_15; +drop trigger d1_update_trig ON part_d_1_15; +drop trigger d1_insert_trig ON part_d_1_15; +drop trigger d15_delete_trig ON part_d_15_20; +drop trigger d15_update_trig ON part_d_15_20; +drop trigger d15_insert_trig ON part_d_15_20; +drop table mintab; -- Creating default partition for range +:init_range_parted; create table part_def partition of range_parted default; \d+ part_def - Table "public.part_def" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | text | | | | extended | | - b | integer | | | | plain | | + Table "public.part_def" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | bigint | | | | plain | | + c | numeric | | | | main | | + d | integer | | | | plain | | + e | character varying | | | | extended | | Partition of: range_parted DEFAULT -Partition constraint: (NOT (((a = 'a'::text) AND (b >= 1) AND (b < 10)) OR ((a = 'a'::text) AND (b >= 10) AND (b < 20)) OR ((a = 'b'::text) AND (b >= 1) AND (b < 10)) OR ((a = 'b'::text) AND (b >= 10) AND (b < 20)))) +Partition constraint: (NOT (((a = 'a'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'a'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'b'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '20'::bigint) AND (b < '30'::bigint)))) insert into range_parted values ('c', 9); -- ok @@ -235,7 +566,55 @@ update part_def set a = 'd' where a = 'c'; -- fail update part_def set a = 'a' where a = 'd'; ERROR: new row for relation "part_def" violates partition constraint -DETAIL: Failing row contains (a, 9). +DETAIL: Failing row contains (a, 9, null, null, null). +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from non-default to default partition. +-- Fail, default partition is not under part_a_10_a_20; +update part_a_10_a_20 set a = 'ad' where a = 'a'; +ERROR: new row for relation "part_a_10_a_20" violates partition constraint +DETAIL: Failing row contains (ad, 10, 200, 1, null). +-- Success +update range_parted set a = 'ad' where a = 'a'; +update range_parted set a = 'bd' where a = 'b'; +:show_data; + partname | a | b | c | d | e +----------+----+----+-----+----+--- + part_def | ad | 1 | 1 | 1 | + part_def | ad | 10 | 200 | 1 | + part_def | bd | 12 | 96 | 1 | + part_def | bd | 13 | 97 | 2 | + part_def | bd | 15 | 105 | 16 | + part_def | bd | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from default to non-default partitions. +-- Success +update range_parted set a = 'a' where a = 'ad'; +update range_parted set a = 'b' where a = 'bd'; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + create table list_parted ( a text, b int @@ -250,6 +629,111 @@ ERROR: new row for relation "list_default" violates partition constraint DETAIL: Failing row contains (a, 10). -- ok update list_default set a = 'x' where a = 'd'; +drop table list_parted; +-------------- +-- UPDATE with +-- partition key or non-partition columns, with different column ordering, +-- triggers. +-------------- +-- Setup +-------- +create table list_parted (a numeric, b int, c int8) partition by list (a); +create table sub_parted partition of list_parted for values in (1) partition by list (b); +create table sub_part1(b int, c int8, a numeric); +alter table sub_parted attach partition sub_part1 for values in (1); +create table sub_part2(b int, c int8, a numeric); +alter table sub_parted attach partition sub_part2 for values in (2); +create table list_part1(a numeric, b int, c int8); +alter table list_parted attach partition list_part1 for values in (2,3); +insert into list_parted values (2,5,50); +insert into list_parted values (3,6,60); +insert into sub_parted values (1,1,60); +insert into sub_parted values (1,2,10); +-- Test partition constraint violation when intermediate ancestor is used and +-- constraint is inherited from upper root. +update sub_parted set a = 2 where c = 10; +ERROR: new row for relation "sub_part2" violates partition constraint +DETAIL: Failing row contains (2, 10, 2). +-- UPDATE which does not modify partition key of partitions that are chosen for update. +select tableoid::regclass::text , * from list_parted where a = 2 order by 1; + tableoid | a | b | c +------------+---+---+---- + list_part1 | 2 | 5 | 50 +(1 row) + +update list_parted set b = c + a where a = 2; +select tableoid::regclass::text , * from list_parted where a = 2 order by 1; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 +(1 row) + +----------- +-- Triggers can cause UPDATE row movement if it modified partition key. +----------- +create function func_parted_mod_b() returns trigger as $$ +begin + NEW.b = 2; -- This is changing partition key column. + return NEW; +end $$ language plpgsql; +create trigger parted_mod_b before update on sub_part1 + for each row execute procedure func_parted_mod_b(); +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 60 + sub_part2 | 1 | 2 | 10 +(4 rows) + +-- This should do the tuple routing even though there is no explicit +-- partition-key update, because there is a trigger on sub_part1 +update list_parted set c = 70 where b = 1 ; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part2 | 1 | 2 | 10 + sub_part2 | 1 | 2 | 70 +(4 rows) + +drop trigger parted_mod_b ON sub_part1 ; +-- If BR DELETE trigger prevented DELETE from happening, we should also skip +-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. +create or replace function func_parted_mod_b() returns trigger as $$ +begin return NULL; end $$ language plpgsql; +create trigger trig_skip_delete before delete on sub_part1 + for each row execute procedure func_parted_mod_b(); +update list_parted set b = 1 where c = 70; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 70 + sub_part2 | 1 | 2 | 10 +(4 rows) + +drop trigger trig_skip_delete ON sub_part1 ; +-- UPDATE partition-key with FROM clause. If join produces multiple output +-- rows for the same row to be modified, we should tuple-route the row only once. +-- There should not be any rows inserted. +create table non_parted (id int); +insert into non_parted values (1), (1), (1), (2), (2), (2), (3), (3), (3); +update list_parted t1 set a = 2 from non_parted t2 where t1.a = t2.id and a = 1; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 1 | 70 + list_part1 | 2 | 2 | 10 + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 +(4 rows) + +drop table non_parted; +drop function func_parted_mod_b(); -- create custom operator class and hash function, for the same reason -- explained in alter_table.sql create or replace function dummy_hashint4(a int4, seed int8) returns int8 as @@ -271,9 +755,8 @@ insert into hpart4 values (3, 4); update hpart1 set a = 3, b=4 where a = 1; ERROR: new row for relation "hpart1" violates partition constraint DETAIL: Failing row contains (3, 4). +-- ok : row movement update hash_parted set b = b - 1 where b = 1; -ERROR: new row for relation "hpart1" violates partition constraint -DETAIL: Failing row contains (1, 0). -- ok update hash_parted set b = b + 8 where b = 1; -- cleanup diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql index 0c70d64..a07f113 100644 --- a/src/test/regress/sql/update.sql +++ b/src/test/regress/sql/update.sql @@ -107,25 +107,229 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a) DROP TABLE update_test; DROP TABLE upsert_test; --- update to a partition should check partition bound constraint for the new tuple -create table range_parted ( + +--------------------------- +-- UPDATE with row movement +--------------------------- + +-- update to a partition should check partition bound constraint for the new tuple. +-- If partition key is updated, the row should be moved to the appropriate +-- partition. updatable views using partitions should enforce the check options +-- for the rows that have been moved. +create table mintab(c1 int); +insert into mintab values (120); +CREATE TABLE range_parted ( a text, - b int + b bigint, + c numeric, + d int, + e varchar ) partition by range (a, b); -create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); -create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); +CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 from mintab) WITH CHECK OPTION; + +-- Create partitions intentionally in descending bound order, so as to test +-- that update-row-movement works with the leaf partitions not in bound order. +create table part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int); +alter table range_parted attach partition part_b_20_b_30 for values from ('b', 20) to ('b', 30); +create table part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) partition by range (c); create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10); -create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20); -insert into part_a_1_a_10 values ('a', 1); -insert into part_b_10_b_20 values ('b', 10); +alter table range_parted attach partition part_b_10_b_20 for values from ('b', 10) to ('b', 20); +create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); +create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); + +-- This tests partition-key UPDATE on a partitioned table that does not have any child partitions +update part_b_10_b_20 set b = b - 6; + +-- As mentioned above, the partition creation is intentionally kept in descending bound order. +create table part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) partition by range (d); +create table part_d_1_15 partition of part_c_100_200 for values from (1) to (15); +create table part_d_15_20 partition of part_c_100_200 for values from (15) to (20); + +alter table part_b_10_b_20 attach partition part_c_100_200 for values from (100) to (200); + +create table part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); +alter table part_b_10_b_20 attach partition part_c_1_100 for values from (1) to (100); + +\set init_range_parted 'truncate range_parted; insert into range_parted values (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted order by 1, 2, 3, 4, 5, 6' +:init_range_parted; +:show_data; + +-- The order of subplans should be in bound order +explain (costs off) update range_parted set c = c - 50 where c > 97; + +-- fail (row movement happens only within the partition subtree) : +update part_c_100_200 set c = c - 20, d = c where c = 105; +-- fail (no partition key update, so no attempt to move tuple, but "a = 'a'" violates partition constraint enforced by root partition) +update part_b_10_b_20 set a = 'a'; +-- success; partition key update, no constraint violation +update range_parted set d = d - 10 where d > 10; +-- success; no partition key update, no constraint violation +update range_parted set e = d; +-- No row found : +update part_c_1_100 set c = c + 20 where c = 98; +-- ok (row movement) +update part_b_10_b_20 set c = c + 20 returning c, b, a; +:show_data; + +-- fail (row movement happens only within the partition subtree) : +update part_b_10_b_20 set b = b - 6 where c > 116 returning *; +-- ok (row movement, with subset of rows moved into different partition) +update range_parted set b = b - 6 where c > 116 returning a, b + c; + +:show_data; + +-- update partition key using updatable view. + +-- succeeds +update upview set c = 199 where b = 4; +-- fail, check option violation +update upview set c = 120 where b = 4; +-- fail, row movement with check option violation +update upview set a = 'b', b = 15, c = 120 where b = 4; +-- succeeds, row movement , check option passes +update upview set a = 'b', b = 15 where b = 4; + +:show_data; + +-- cleanup +drop view upview; + +-- RETURNING having whole-row vars. +---------------------------------- +:init_range_parted; +update range_parted set c = 95 where a = 'b' and b > 10 and c > 100 returning (range_parted) , *; +:show_data; + + +-- Transition tables with update row movement +--------------------------------------------- +:init_range_parted; + +create function trans_updatetrigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by a) from old_table), + (select string_agg(new_table::text, ', ' order by a) from new_table); + return null; + end; +$$; + +create trigger trans_updatetrig + after update on range_parted referencing old table as old_table new table as new_table + for each statement execute procedure trans_updatetrigfunc(); + +update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96; +:show_data; +:init_range_parted; + +-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers +-- should not cause DELETEd rows to be captured twice. Similar thing for +-- INSERT triggers and inserted rows. +create trigger trans_deletetrig + after delete on range_parted referencing old table as old_table + for each statement execute procedure trans_updatetrigfunc(); +create trigger trans_inserttrig + after insert on range_parted referencing new table as new_table + for each statement execute procedure trans_updatetrigfunc(); +update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96; +:show_data; +drop trigger trans_updatetrig ON range_parted; +drop trigger trans_deletetrig ON range_parted; +drop trigger trans_inserttrig ON range_parted; + +-- Install BR triggers on child partition, so that transition tuple conversion takes place. +create function func_parted_mod_b() returns trigger as $$ +begin + NEW.b = NEW.b + 1; + return NEW; +end $$ language plpgsql; +create trigger trig_c1_100 before update or insert on part_c_1_100 + for each row execute procedure func_parted_mod_b(); +create trigger trig_d1_15 before update or insert on part_d_1_15 + for each row execute procedure func_parted_mod_b(); +create trigger trig_d15_20 before update or insert on part_d_15_20 + for each row execute procedure func_parted_mod_b(); +:init_range_parted; +update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96; +:show_data; +:init_range_parted; +update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96; +:show_data; +drop trigger trig_c1_100 ON part_c_1_100; +drop trigger trig_d1_15 ON part_d_1_15; +drop trigger trig_d15_20 ON part_d_15_20; +drop function func_parted_mod_b(); + + +-- statement triggers with update row movement +--------------------------------------------------- + +:init_range_parted; + +create function trigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = % fired on table % during %', + TG_NAME, TG_TABLE_NAME, TG_OP; + return null; + end; +$$; +-- Triggers on root partition +create trigger parent_delete_trig + after delete on range_parted for each statement execute procedure trigfunc(); +create trigger parent_update_trig + after update on range_parted for each statement execute procedure trigfunc(); +create trigger parent_insert_trig + after insert on range_parted for each statement execute procedure trigfunc(); + +-- Triggers on leaf partition part_c_1_100 +create trigger c1_delete_trig + after delete on part_c_1_100 for each statement execute procedure trigfunc(); +create trigger c1_update_trig + after update on part_c_1_100 for each statement execute procedure trigfunc(); +create trigger c1_insert_trig + after insert on part_c_1_100 for each statement execute procedure trigfunc(); + +-- Triggers on leaf partition part_d_1_15 +create trigger d1_delete_trig + after delete on part_d_1_15 for each statement execute procedure trigfunc(); +create trigger d1_update_trig + after update on part_d_1_15 for each statement execute procedure trigfunc(); +create trigger d1_insert_trig + after insert on part_d_1_15 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_15_20 +create trigger d15_delete_trig + after delete on part_d_15_20 for each statement execute procedure trigfunc(); +create trigger d15_update_trig + after update on part_d_15_20 for each statement execute procedure trigfunc(); +create trigger d15_insert_trig + after insert on part_d_15_20 for each statement execute procedure trigfunc(); + +-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or insert statement triggers should be fired. +update range_parted set c = c - 50 where c > 97; +:show_data; + +drop trigger parent_delete_trig ON range_parted; +drop trigger parent_update_trig ON range_parted; +drop trigger parent_insert_trig ON range_parted; +drop trigger c1_delete_trig ON part_c_1_100; +drop trigger c1_update_trig ON part_c_1_100; +drop trigger c1_insert_trig ON part_c_1_100; +drop trigger d1_delete_trig ON part_d_1_15; +drop trigger d1_update_trig ON part_d_1_15; +drop trigger d1_insert_trig ON part_d_1_15; +drop trigger d15_delete_trig ON part_d_15_20; +drop trigger d15_update_trig ON part_d_15_20; +drop trigger d15_insert_trig ON part_d_15_20; + +drop table mintab; --- fail -update part_a_1_a_10 set a = 'b' where a = 'a'; -update range_parted set b = b - 1 where b = 10; --- ok -update range_parted set b = b + 1 where b = 10; -- Creating default partition for range +:init_range_parted; create table part_def partition of range_parted default; \d+ part_def insert into range_parted values ('c', 9); @@ -134,6 +338,21 @@ update part_def set a = 'd' where a = 'c'; -- fail update part_def set a = 'a' where a = 'd'; +:show_data; + +-- Update row movement from non-default to default partition. +-- Fail, default partition is not under part_a_10_a_20; +update part_a_10_a_20 set a = 'ad' where a = 'a'; +-- Success +update range_parted set a = 'ad' where a = 'a'; +update range_parted set a = 'bd' where a = 'b'; +:show_data; +-- Update row movement from default to non-default partitions. +-- Success +update range_parted set a = 'a' where a = 'ad'; +update range_parted set a = 'b' where a = 'bd'; +:show_data; + create table list_parted ( a text, b int @@ -148,6 +367,84 @@ update list_default set a = 'a' where a = 'd'; -- ok update list_default set a = 'x' where a = 'd'; +drop table list_parted; + +-------------- +-- UPDATE with +-- partition key or non-partition columns, with different column ordering, +-- triggers. +-------------- + +-- Setup +-------- +create table list_parted (a numeric, b int, c int8) partition by list (a); +create table sub_parted partition of list_parted for values in (1) partition by list (b); + +create table sub_part1(b int, c int8, a numeric); +alter table sub_parted attach partition sub_part1 for values in (1); +create table sub_part2(b int, c int8, a numeric); +alter table sub_parted attach partition sub_part2 for values in (2); + +create table list_part1(a numeric, b int, c int8); +alter table list_parted attach partition list_part1 for values in (2,3); + +insert into list_parted values (2,5,50); +insert into list_parted values (3,6,60); +insert into sub_parted values (1,1,60); +insert into sub_parted values (1,2,10); + +-- Test partition constraint violation when intermediate ancestor is used and +-- constraint is inherited from upper root. +update sub_parted set a = 2 where c = 10; + +-- UPDATE which does not modify partition key of partitions that are chosen for update. +select tableoid::regclass::text , * from list_parted where a = 2 order by 1; +update list_parted set b = c + a where a = 2; +select tableoid::regclass::text , * from list_parted where a = 2 order by 1; + + +----------- +-- Triggers can cause UPDATE row movement if it modified partition key. +----------- +create function func_parted_mod_b() returns trigger as $$ +begin + NEW.b = 2; -- This is changing partition key column. + return NEW; +end $$ language plpgsql; +create trigger parted_mod_b before update on sub_part1 + for each row execute procedure func_parted_mod_b(); + +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + +-- This should do the tuple routing even though there is no explicit +-- partition-key update, because there is a trigger on sub_part1 +update list_parted set c = 70 where b = 1 ; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + +drop trigger parted_mod_b ON sub_part1 ; + +-- If BR DELETE trigger prevented DELETE from happening, we should also skip +-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. +create or replace function func_parted_mod_b() returns trigger as $$ +begin return NULL; end $$ language plpgsql; +create trigger trig_skip_delete before delete on sub_part1 + for each row execute procedure func_parted_mod_b(); +update list_parted set b = 1 where c = 70; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; + +drop trigger trig_skip_delete ON sub_part1 ; + +-- UPDATE partition-key with FROM clause. If join produces multiple output +-- rows for the same row to be modified, we should tuple-route the row only once. +-- There should not be any rows inserted. +create table non_parted (id int); +insert into non_parted values (1), (1), (1), (2), (2), (2), (3), (3), (3); +update list_parted t1 set a = 2 from non_parted t2 where t1.a = t2.id and a = 1; +select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4; +drop table non_parted; + +drop function func_parted_mod_b(); + -- create custom operator class and hash function, for the same reason -- explained in alter_table.sql create or replace function dummy_hashint4(a int4, seed int8) returns int8 as @@ -169,6 +466,7 @@ insert into hpart4 values (3, 4); -- fail update hpart1 set a = 3, b=4 where a = 1; +-- ok : row movement update hash_parted set b = b - 1 where b = 1; -- ok update hash_parted set b = b + 8 where b = 1;