| Line | Hits | Source | Commit |
| 643 |
- |
set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, |
- |
| 644 |
- |
RangeTblEntry *rte) |
- |
| 645 |
- |
{ |
- |
| 646 |
- |
/* |
- |
| 647 |
- |
* The flag has previously been initialized to false, so we can just |
- |
| 648 |
- |
* return if it becomes clear that we can't safely set it. |
- |
| 649 |
- |
*/ |
- |
| 650 |
- |
Assert(!rel->consider_parallel); |
- |
| 651 |
- |
|
- |
| 652 |
- |
/* Don't call this if parallelism is disallowed for the entire query. */ |
- |
| 653 |
- |
Assert(root->glob->parallelModeOK); |
- |
| 654 |
- |
|
- |
| 655 |
- |
/* This should only be called for baserels and appendrel children. */ |
- |
| 656 |
- |
Assert(IS_SIMPLE_REL(rel)); |
- |
| 657 |
- |
|
- |
| 658 |
- |
/* Assorted checks based on rtekind. */ |
- |
| 659 |
- |
switch (rte->rtekind) |
- |
| 660 |
- |
{ |
- |
| 661 |
- |
case RTE_RELATION: |
- |
| 662 |
- |
|
- |
| 663 |
- |
/* |
- |
| 664 |
- |
* Currently, parallel workers can't access the leader's temporary |
- |
| 665 |
- |
* tables. We could possibly relax this if we wrote all of its |
- |
| 666 |
- |
* local buffers at the start of the query and made no changes |
- |
| 667 |
- |
* thereafter (maybe we could allow hint bit changes), and if we |
- |
| 668 |
- |
* taught the workers to read them. Writing a large number of |
- |
| 669 |
- |
* temporary buffers could be expensive, though, and we don't have |
- |
| 670 |
- |
* the rest of the necessary infrastructure right now anyway. So |
- |
| 671 |
- |
* for now, bail out if we see a temporary table. |
- |
| 672 |
- |
*/ |
- |
| 673 |
- |
if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP) |
- |
| 674 |
- |
return; |
- |
| 675 |
- |
|
- |
| 676 |
- |
/* |
- |
| 677 |
- |
* Table sampling can be pushed down to workers if the sample |
- |
| 678 |
- |
* function and its arguments are safe. |
- |
| 679 |
- |
*/ |
- |
| 680 |
- |
if (rte->tablesample != NULL) |
- |
| 681 |
- |
{ |
- |
| 682 |
- |
char proparallel = func_parallel(rte->tablesample->tsmhandler); |
- |
| 683 |
- |
|
- |
| 684 |
- |
if (proparallel != PROPARALLEL_SAFE) |
- |
| 685 |
- |
return; |
- |
| 686 |
- |
if (!is_parallel_safe(root, (Node *) rte->tablesample->args)) |
- |
| 687 |
- |
return; |
- |
| 688 |
- |
} |
- |
| 689 |
- |
|
- |
| 690 |
- |
/* |
- |
| 691 |
- |
* Ask FDWs whether they can support performing a ForeignScan |
- |
| 692 |
- |
* within a worker. Most often, the answer will be no. For |
- |
| 693 |
- |
* example, if the nature of the FDW is such that it opens a TCP |
- |
| 694 |
- |
* connection with a remote server, each parallel worker would end |
- |
| 695 |
- |
* up with a separate connection, and these connections might not |
- |
| 696 |
- |
* be appropriately coordinated between workers and the leader. |
- |
| 697 |
- |
*/ |
- |
| 698 |
- |
if (rte->relkind == RELKIND_FOREIGN_TABLE) |
- |
| 699 |
- |
{ |
- |
| 700 |
- |
Assert(rel->fdwroutine); |
- |
| 701 |
- |
if (!rel->fdwroutine->IsForeignScanParallelSafe) |
- |
| 702 |
- |
return; |
- |
| 703 |
- |
if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte)) |
- |
| 704 |
- |
return; |
- |
| 705 |
- |
} |
- |
| 706 |
- |
|
- |
| 707 |
- |
/* |
- |
| 708 |
- |
* There are additional considerations for appendrels, which we'll |
- |
| 709 |
- |
* deal with in set_append_rel_size and set_append_rel_pathlist. |
- |
| 710 |
- |
* For now, just set consider_parallel based on the rel's own |
- |
| 711 |
- |
* quals and targetlist. |
- |
| 712 |
- |
*/ |
- |
| 713 |
- |
break; |
- |
| 714 |
- |
|
- |
| 715 |
- |
case RTE_SUBQUERY: |
- |
| 716 |
- |
|
- |
| 717 |
- |
/* |
- |
| 718 |
- |
* There's no intrinsic problem with scanning a subquery-in-FROM |
- |
| 719 |
- |
* (as distinct from a SubPlan or InitPlan) in a parallel worker. |
- |
| 720 |
- |
* If the subquery doesn't happen to have any parallel-safe paths, |
- |
| 721 |
- |
* then flagging it as consider_parallel won't change anything, |
- |
| 722 |
- |
* but that's true for plain tables, too. We must set |
- |
| 723 |
- |
* consider_parallel based on the rel's own quals and targetlist, |
- |
| 724 |
- |
* so that if a subquery path is parallel-safe but the quals and |
- |
| 725 |
- |
* projection we're sticking onto it are not, we correctly mark |
- |
| 726 |
- |
* the SubqueryScanPath as not parallel-safe. (Note that |
- |
| 727 |
- |
* set_subquery_pathlist() might push some of these quals down |
- |
| 728 |
- |
* into the subquery itself, but that doesn't change anything.) |
- |
| 729 |
- |
* |
- |
| 730 |
- |
* We can't push sub-select containing LIMIT/OFFSET to workers as |
- |
| 731 |
- |
* there is no guarantee that the row order will be fully |
- |
| 732 |
- |
* deterministic, and applying LIMIT/OFFSET will lead to |
- |
| 733 |
- |
* inconsistent results at the top-level. (In some cases, where |
- |
| 734 |
- |
* the result is ordered, we could relax this restriction. But it |
- |
| 735 |
- |
* doesn't currently seem worth expending extra effort to do so.) |
- |
| 736 |
- |
*/ |
- |
| 737 |
- |
{ |
- |
| 738 |
- |
Query *subquery = castNode(Query, rte->subquery); |
- |
| 739 |
- |
|
- |
| 740 |
- |
if (limit_needed(subquery)) |
- |
| 741 |
- |
return; |
- |
| 742 |
- |
} |
- |
| 743 |
- |
break; |
- |
| 744 |
- |
|
- |
| 745 |
- |
case RTE_JOIN: |
- |
| 746 |
- |
/* Shouldn't happen; we're only considering baserels here. */ |
- |
| 747 |
- |
Assert(false); |
- |
| 748 |
- |
return; |
- |
| 749 |
- |
|
- |
| 750 |
- |
case RTE_FUNCTION: |
- |
| 751 |
- |
/* Check for parallel-restricted functions. */ |
- |
| 752 |
- |
if (!is_parallel_safe(root, (Node *) rte->functions)) |
- |
| 753 |
- |
return; |
- |
| 754 |
- |
break; |
- |
| 755 |
- |
|
- |
| 756 |
- |
case RTE_TABLEFUNC: |
- |
| 757 |
- |
/* not parallel safe */ |
- |
| 758 |
- |
return; |
- |
| 759 |
- |
|
- |
| 760 |
- |
case RTE_VALUES: |
- |
| 761 |
- |
/* Check for parallel-restricted functions. */ |
- |
| 762 |
- |
if (!is_parallel_safe(root, (Node *) rte->values_lists)) |
- |
| 763 |
- |
return; |
- |
| 764 |
- |
break; |
- |
| 765 |
- |
|
- |
| 766 |
- |
case RTE_CTE: |
- |
| 767 |
- |
|
- |
| 768 |
- |
/* |
- |
| 769 |
- |
* CTE tuplestores aren't shared among parallel workers, so we |
- |
| 770 |
- |
* force all CTE scans to happen in the leader. Also, populating |
- |
| 771 |
- |
* the CTE would require executing a subplan that's not available |
- |
| 772 |
- |
* in the worker, might be parallel-restricted, and must get |
- |
| 773 |
- |
* executed only once. |
- |
| 774 |
- |
*/ |
- |
| 775 |
- |
return; |
- |
| 776 |
- |
|
- |
| 777 |
- |
case RTE_NAMEDTUPLESTORE: |
- |
| 778 |
- |
|
- |
| 779 |
- |
/* |
- |
| 780 |
- |
* tuplestore cannot be shared, at least without more |
- |
| 781 |
- |
* infrastructure to support that. |
- |
| 782 |
- |
*/ |
- |
| 783 |
- |
return; |
- |
| 784 |
- |
|
- |
| 785 |
- |
case RTE_RESULT: |
- |
| 786 |
- |
/* RESULT RTEs, in themselves, are no problem. */ |
- |
| 787 |
- |
break; |
- |
| 788 |
- |
|
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 789 |
- |
case RTE_GRAPH_TABLE: |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 790 |
- |
|
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 791 |
- |
/* |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 792 |
- |
* Shouldn't happen since these are replaced by subquery RTEs when |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 793 |
- |
* rewriting queries. |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 794 |
- |
*/ |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 795 |
- |
Assert(false); |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 796 |
- |
return; |
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 797 |
- |
|
86c14eaWIP: SQL Property Graph Queries (SQL/PGQ) |
| 798 |
- |
case RTE_GROUP: |
- |
| 799 |
- |
/* Shouldn't happen; we're only considering baserels here. */ |
- |
| 800 |
- |
Assert(false); |
- |
| 801 |
- |
return; |
- |
| 802 |
- |
} |
- |
| 803 |
- |
|
- |
| 804 |
- |
/* |
- |
| 805 |
- |
* If there's anything in baserestrictinfo that's parallel-restricted, we |
- |
| 806 |
- |
* give up on parallelizing access to this relation. We could consider |
- |
| 807 |
- |
* instead postponing application of the restricted quals until we're |
- |
| 808 |
- |
* above all the parallelism in the plan tree, but it's not clear that |
- |
| 809 |
- |
* that would be a win in very many cases, and it might be tricky to make |
- |
| 810 |
- |
* outer join clauses work correctly. It would likely break equivalence |
- |
| 811 |
- |
* classes, too. |
- |
| 812 |
- |
*/ |
- |
| 813 |
- |
if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo)) |
- |
| 814 |
- |
return; |
- |
| 815 |
- |
|
- |
| 816 |
- |
/* |
- |
| 817 |
- |
* Likewise, if the relation's outputs are not parallel-safe, give up. |
- |
| 818 |
- |
* (Usually, they're just Vars, but sometimes they're not.) |
- |
| 819 |
- |
*/ |
- |
| 820 |
- |
if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs)) |
- |
| 821 |
- |
return; |
- |
| 822 |
- |
|
- |
| 823 |
- |
/* We have a winner. */ |
- |
| 824 |
- |
rel->consider_parallel = true; |
- |
| 825 |
- |
} |
- |