diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py index f6fa65e..b4d99d7 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py @@ -11,7 +11,6 @@ import simplejson as json import re -from functools import wraps import pgadmin.browser.server_groups.servers.databases as database from flask import render_template, request, jsonify @@ -21,12 +20,11 @@ from pgadmin.browser.server_groups.servers.databases.schemas.utils \ trigger_definition, parse_rule_definition from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \ parse_priv_to_db -from pgadmin.browser.utils import PGChildNodeView from pgadmin.utils.ajax import make_json_response, internal_server_error, \ make_response as ajax_response, gone from pgadmin.utils.driver import get_driver - from config import PG_DEFAULT_DRIVER +from .utils import BaseTableView class TableModule(SchemaChildModule): @@ -83,7 +81,7 @@ class TableModule(SchemaChildModule): blueprint = TableModule(__name__) -class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): +class TableView(BaseTableView, DataTypeReader, VacuumSettings): """ This class is responsible for generating routes for Table node @@ -96,11 +94,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): - This property defines (if javascript) exists for this node. Override this property for your own logic - * check_precondition() - - This function will behave as a decorator which will checks - database connection before running view, it will also attaches - manager,conn & template_path properties to self - * list() - This function is used to list all the Table nodes within that collection. @@ -149,10 +142,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): - This function will generate dependent list to show it in dependent pane for the selected node. - * _formatter(data, tid) - - It will return formatted output of query result - as per client model format - * get_types(self, gid, sid, did, scid) - This function will return list of types available for columns node via AJAX response @@ -178,19 +167,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): * get_toast_table_vacuum(gid, sid, did, scid=None, tid=None) - Fetch the default values for toast table auto-vacuum - * _columns_formatter(tid, data): - - It will return formatted output of query result - as per client model format for column node - - * _index_constraints_formatter(self, did, tid, data): - - It will return formatted output of query result - as per client model format for index constraint node - - * _cltype_formatter(type): (staticmethod) - - We need to remove [] from type and append it - after length/precision so we will send flag for - sql template - * _parse_format_columns(self, data, mode=None): - This function will parse and return formatted list of columns added by user @@ -252,71 +228,14 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): 'get_access_methods': [{}, {'get': 'get_access_methods'}], 'get_oper_class': [{}, {'get': 'get_oper_class'}], 'get_operator': [{}, {'get': 'get_operator'}], + 'get_attach_tables': [{'get': 'get_attach_tables'},{'get': 'get_attach_tables'}], 'select_sql': [{'get': 'select_sql'}], 'insert_sql': [{'get': 'insert_sql'}], 'update_sql': [{'get': 'update_sql'}], 'delete_sql': [{'get': 'delete_sql'}] - }) - def check_precondition(f): - """ - This function will behave as a decorator which will checks - database connection before running view, it will also attaches - manager,conn & template_path properties to self - """ - - @wraps(f) - def wrap(*args, **kwargs): - # Here args[0] will hold self & kwargs will hold gid,sid,did - self = args[0] - driver = get_driver(PG_DEFAULT_DRIVER) - did = kwargs['did'] - self.manager = driver.connection_manager(kwargs['sid']) - self.conn = self.manager.connection(did=kwargs['did']) - self.qtIdent = driver.qtIdent - self.qtTypeIdent = driver.qtTypeIdent - # We need datlastsysoid to check if current table is system table - self.datlastsysoid = self.manager.db_info[ - did - ]['datlastsysoid'] if self.manager.db_info is not None and \ - did in self.manager.db_info else 0 - - ver = self.manager.version - # Set the template path for the SQL scripts - self.template_path = 'table/sql/#{0}#'.format(ver) - - # Template for Column ,check constraint and exclusion constraint node - self.column_template_path = 'column/sql/#{0}#'.format(ver) - self.check_constraint_template_path = 'check_constraint/sql/#{0}#'.format(ver) - self.exclusion_constraint_template_path = 'exclusion_constraint/sql/#{0}#'.format(ver) - - # Template for PK & Unique constraint node - self.index_constraint_template_path = 'index_constraint/sql' - - # Template for foreign key constraint node - self.foreign_key_template_path = 'foreign_key/sql/#{0}#'.format(ver) - - # Template for index node - self.index_template_path = 'index/sql/#{0}#'.format(ver) - - # Template for trigger node - self.trigger_template_path = 'trigger/sql/#{0}#'.format(ver) - - # Template for rules node - self.rules_template_path = 'rules/sql' - - # Supported ACL for table - self.acl = ['a', 'r', 'w', 'd', 'D', 'x', 't'] - - # Supported ACL for columns - self.column_acl = ['a', 'r', 'w', 'x'] - - return f(*args, **kwargs) - - return wrap - - @check_precondition + @BaseTableView.check_precondition def list(self, gid, sid, did, scid): """ This function is used to list all the table nodes within that collection. @@ -330,7 +249,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: JSON of available table nodes """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, datlastsysoid=self.datlastsysoid) @@ -343,7 +262,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def node(self, gid, sid, did, scid, tid): """ This function is used to list all the table nodes within that collection. @@ -359,7 +278,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of available table nodes """ res = [] - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'nodes.sql']), scid=scid, tid=tid) status, rset = self.conn.execute_2darray(SQL) @@ -374,7 +293,8 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): rset['rows'][0]['name'], icon="icon-table", tigger_count=rset['rows'][0]['triggercount'], - has_enable_triggers=rset['rows'][0]['has_enable_triggers'] + has_enable_triggers=rset['rows'][0]['has_enable_triggers'], + is_partitioned=rset['rows'][0]['is_partitioned'] if 'is_partitioned' in rset['rows'][0] else False ) return make_json_response( @@ -382,8 +302,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - - @check_precondition + @BaseTableView.check_precondition def nodes(self, gid, sid, did, scid): """ This function is used to list all the table nodes within that collection. @@ -398,7 +317,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of available table nodes """ res = [] - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'nodes.sql']), scid=scid) status, rset = self.conn.execute_2darray(SQL) @@ -413,7 +332,8 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): row['name'], icon="icon-table", tigger_count=row['triggercount'], - has_enable_triggers=row['has_enable_triggers'] + has_enable_triggers=row['has_enable_triggers'], + is_partitioned=row['is_partitioned'] if 'is_partitioned' in row else False )) return make_json_response( @@ -421,7 +341,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_all_tables(self, gid, sid, did, scid, tid=None): """ Args: @@ -435,7 +355,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns the lits of tables required for constraints. """ try: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_tables_for_constraints.sql']), show_sysobj=self.blueprint.show_system_objects) @@ -452,7 +372,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_table_vacuum(self, gid, sid, did, scid=None, tid=None): """ Fetch the default values for table auto-vacuum @@ -468,7 +388,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_toast_table_vacuum(self, gid, sid, did, scid=None, tid=None): """ Fetch the default values for toast table auto-vacuum @@ -484,7 +404,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_access_methods(self, gid, sid, did, scid, tid=None): """ This function returns access methods. @@ -518,7 +438,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_oper_class(self, gid, sid, did, scid, tid=None): """ @@ -555,7 +475,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_operator(self, gid, sid, did, scid, tid=None): """ @@ -593,339 +513,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - def _columns_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for column node - """ - for column in data['columns']: - - # We need to format variables according to client js collection - if 'attoptions' in column and column['attoptions'] is not None: - spcoptions = [] - for spcoption in column['attoptions']: - k, v = spcoption.split('=') - spcoptions.append({'name': k, 'value': v}) - - column['attoptions'] = spcoptions - - # Need to format security labels according to client js collection - if 'seclabels' in column and column['seclabels'] is not None: - seclabels = [] - for seclbls in column['seclabels']: - k, v = seclbls.split('=') - seclabels.append({'provider': k, 'label': v}) - - column['seclabels'] = seclabels - - if 'attnum' in column and column['attnum'] is not None and \ - column['attnum'] > 0: - # We need to parse & convert ACL coming from database to json format - SQL = render_template("/".join([self.column_template_path, 'acl.sql']), - tid=tid, clid=column['attnum']) - status, acl = self.conn.execute_dict(SQL) - - if not status: - return internal_server_error(errormsg=acl) - - # We will set get privileges from acl sql so we don't need - # it from properties sql - column['attacl'] = [] - - for row in acl['rows']: - priv = parse_priv_from_db(row) - column.setdefault(row['deftype'], []).append(priv) - - # we are receiving request when in edit mode - # we will send filtered types related to current type - present_type = column['cltype'] - - type_id = column['atttypid'] - - fulltype = self.get_full_type( - column['typnspname'], column['typname'], - column['isdup'], column['attndims'], column['atttypmod'] - ) - - length = False - precision = False - if 'elemoid' in column: - length, precision, typeval = self.get_length_precision(column['elemoid']) - - # Set length and precision to None - column['attlen'] = None - column['attprecision'] = None - - # If we have length & precision both - if length and precision: - matchObj = re.search(r'(\d+),(\d+)', fulltype) - if matchObj: - column['attlen'] = matchObj.group(1) - column['attprecision'] = matchObj.group(2) - elif length: - # If we have length only - matchObj = re.search(r'(\d+)', fulltype) - if matchObj: - column['attlen'] = matchObj.group(1) - column['attprecision'] = None - - - SQL = render_template("/".join([self.column_template_path, - 'is_referenced.sql']), - tid=tid, clid=column['attnum']) - - status, is_reference = self.conn.execute_scalar(SQL) - - edit_types_list = list() - # We will need present type in edit mode - - if column['typnspname'] == "pg_catalog" or column['typnspname'] == "public": - edit_types_list.append(present_type) - else: - t = self.qtTypeIdent(self.conn, column['typnspname'], present_type) - edit_types_list.append(t) - column['cltype'] = t - - if int(is_reference) == 0: - SQL = render_template("/".join([self.column_template_path, - 'edit_mode_types.sql']), - type_id=type_id) - status, rset = self.conn.execute_2darray(SQL) - - for row in rset['rows']: - edit_types_list.append(row['typname']) - else: - edit_types_list.append(present_type) - - column['edit_types'] = edit_types_list - column['cltype'] = DataTypeReader.parse_type_name(column['cltype']) - - if 'indkey' in column: - # Current column - attnum = str(column['attnum']) - - # Single/List of primary key column(s) - indkey = str(column['indkey']) - - # We will check if column is in primary column(s) - if attnum in indkey.split(" "): - column['is_primary_key'] = True - else: - column['is_primary_key'] = False - - return data - - def _index_constraints_formatter(self, did, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for index constraint node - """ - - # We will fetch all the index constraints for the table - index_constraints = { - 'p': 'primary_key', 'u': 'unique_constraint' - } - - for ctype in index_constraints.keys(): - data[index_constraints[ctype]] = [] - - sql = render_template("/".join([self.index_constraint_template_path, - 'properties.sql']), - did=did, tid=tid, - constraint_type=ctype) - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - for row in res['rows']: - result = row - sql = render_template( - "/".join([self.index_constraint_template_path, - 'get_constraint_cols.sql']), - cid=row['oid'], - colcnt=row['indnatts']) - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - for r in res['rows']: - columns.append({"column": r['column'].strip('"')}) - - result['columns'] = columns - - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault(index_constraints[ctype], []).append(result) - - return data - - def _foreign_key_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for foreign key constraint node - """ - - # We will fetch all the index constraints for the table - sql = render_template("/".join([self.foreign_key_template_path, - 'properties.sql']), - tid=tid) - - status, result = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=result) - - for fk in result['rows']: - - sql = render_template("/".join([self.foreign_key_template_path, - 'get_constraint_cols.sql']), - tid=tid, - keys=zip(fk['confkey'], fk['conkey']), - confrelid=fk['confrelid']) - - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - cols = [] - for row in res['rows']: - columns.append({"local_column": row['conattname'], - "references": fk['confrelid'], - "referenced": row['confattname']}) - cols.append(row['conattname']) - - fk['columns'] = columns - - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_parent.sql']), - tid=fk['columns'][0]['references']) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - fk['remote_schema'] = rset['rows'][0]['schema'] - fk['remote_table'] = rset['rows'][0]['table'] - - coveringindex = self.search_coveringindex(tid, cols) - - fk['coveringindex'] = coveringindex - if coveringindex: - fk['autoindex'] = True - fk['hasindex'] = True - else: - fk['autoindex'] = False - fk['hasindex'] = False - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault('foreign_key', []).append(fk) - - return data - - def _check_constraint_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for check constraint node - """ - - # We will fetch all the index constraints for the table - SQL = render_template("/".join([self.check_constraint_template_path, - 'properties.sql']), - tid=tid) - - status, res = self.conn.execute_dict(SQL) - - if not status: - return internal_server_error(errormsg=res) - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - - data['check_constraint'] = res['rows'] - - return data - - def _exclusion_constraint_formatter(self, did, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for exclusion constraint node - """ - - # We will fetch all the index constraints for the table - sql = render_template("/".join([self.exclusion_constraint_template_path, - 'properties.sql']), - did=did, tid=tid) - - status, result = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=result) - - for ex in result['rows']: - - sql = render_template("/".join([self.exclusion_constraint_template_path, - 'get_constraint_cols.sql']), - cid=ex['oid'], - colcnt=ex['indnatts']) - - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - for row in res['rows']: - if row['options'] & 1: - order = False - nulls_order = True if (row['options'] & 2) else False - else: - order = True - nulls_order = True if (row['options'] & 2) else False - - columns.append({"column": row['coldef'].strip('"'), - "oper_class": row['opcname'], - "order": order, - "nulls_order": nulls_order, - "operator": row['oprname'], - "col_type": row['datatype'] - }) - - ex['columns'] = columns - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault('exclude_constraint', []).append(ex) - - return data - def search_coveringindex(self, tid, cols): """ @@ -966,133 +553,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return None - def _formatter(self, did, scid, tid, data): - """ - Args: - data: dict of query result - scid: schema oid - tid: table oid - - Returns: - It will return formatted output of query result - as per client model format - """ - # Need to format security labels according to client js collection - if 'seclabels' in data and data['seclabels'] is not None: - seclabels = [] - for seclbls in data['seclabels']: - k, v = seclbls.split('=') - seclabels.append({'provider': k, 'label': v}) - - data['seclabels'] = seclabels - - # We need to parse & convert ACL coming from database to json format - SQL = render_template("/".join([self.template_path, 'acl.sql']), - tid=tid, scid=scid) - status, acl = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=acl) - - # We will set get privileges from acl sql so we don't need - # it from properties sql - for row in acl['rows']: - priv = parse_priv_from_db(row) - if row['deftype'] in data: - data[row['deftype']].append(priv) - else: - data[row['deftype']] = [priv] - - # We will add Auto vacuum defaults with out result for grid - data['vacuum_table'] = self.parse_vacuum_data(self.conn, data, 'table') - data['vacuum_toast'] = self.parse_vacuum_data(self.conn, data, 'toast') - - # Fetch columns for the table logic - # - # 1) Check if of_type and inherited tables are present? - # 2) If yes then Fetch all the columns for of_type and inherited tables - # 3) Add columns in columns collection - # 4) Find all the columns for tables and filter out columns which are - # not inherited from any table & format them one by one - - # Get of_type table columns and add it into columns dict - if data['typname']: - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tname=data['typname']) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - data['columns'] = res['rows'] - - # Get inherited table(s) columns and add it into columns dict - elif data['coll_inherits'] and len(data['coll_inherits']) > 0: - columns = [] - # Return all tables which can be inherited & do not show - # system columns - SQL = render_template("/".join([self.template_path, 'get_inherits.sql']), - show_system_objects=False - ) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - if row['inherits'] in data['coll_inherits']: - # Fetch columns using inherited table OID - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tid=row['oid']) - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - columns.extend(res['rows'][:]) - data['columns'] = columns - - # We will fetch all the columns for the table using - # columns properties.sql, so we need to set template path - SQL = render_template("/".join([self.column_template_path, - 'properties.sql']), - tid=tid, - show_sys_objects=False - ) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - all_columns = res['rows'] - - # Filter inherited columns from all columns - if 'columns' in data and len(data['columns']) > 0 \ - and len(all_columns) > 0: - for row in data['columns']: - for i, col in enumerate(all_columns): - # If both name are same then remove it - # as it is inherited from other table - if col['name'] == row['name']: - # Remove same column from all_columns as - # already have it columns collection - del all_columns[i] - - # If any column is added then update columns collection - if len(all_columns) > 0: - data['columns'] += all_columns - # If no inherited columns found then add all columns - elif len(all_columns) > 0: - data['columns'] = all_columns - - if 'columns' in data and len(data['columns']) > 0: - data = self._columns_formatter(tid, data) - - # Here we will add constraint in our output - data = self._index_constraints_formatter(did, tid, data) - data = self._foreign_key_formatter(tid, data) - data = self._check_constraint_formatter(tid, data) - data = self._exclusion_constraint_formatter(did, tid, data) - - return data - - @check_precondition + @BaseTableView.check_precondition def properties(self, gid, sid, did, scid, tid): """ This function will show the properties of the selected table node. @@ -1109,7 +570,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of selected table node """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -1140,19 +601,60 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = self._formatter(did, scid, tid, data) + # Fetch partition of this table if it is partitioned table. + if 'is_partitioned' in data and data['is_partitioned']: + # get the partition type + data['partition_type'] = data['partition_scheme'].split()[0].lower() + + partitions = [] + SQL = render_template("/".join([self.partition_template_path, + 'nodes.sql']), + scid=scid, tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + partition_name = row['name'] + # if schema name is different then display schema + # qualified name on UI. + if data['schema'] != row['schema_name']: + partition_name = row['schema_name'] + '.' + row['name'] + + range_part = row['partition_value'].replace(', ', ',').split() + if data['partition_type'] == 'range': + range_from = range_part[3].replace('(', '').replace(')', '') + range_to = range_part[5].replace('(', '').replace(')', '') + + partitions.append({ + 'oid': row['oid'], + 'partition_name': partition_name, + 'values_from': range_from, + 'values_to': range_to + }) + else: + range_in = range_part[3].replace('(', '').replace(')', '') + partitions.append({ + 'oid': row['oid'], + 'partition_name': partition_name, + 'values_in': range_in + }) + + data['partitions'] = partitions + return ajax_response( response=data, status=200 ) - @check_precondition + @BaseTableView.check_precondition def types(self, gid, sid, did, scid, tid=None, clid=None): """ Returns: This function will return list of types available for column node for node-ajax-control """ - condition = render_template("/".join([self.template_path, + condition = render_template("/".join([self.table_template_path, 'get_types_where_condition.sql']), show_system_objects=self.blueprint.show_system_objects) @@ -1166,7 +668,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_columns(self, gid, sid, did, scid, tid=None): """ Returns the Table Columns. @@ -1189,11 +691,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = request.args if request.args else None try: if data and 'tid' in data: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_columns_for_table.sql']), tid=data['tid']) elif data and 'tname' in data: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_columns_for_table.sql']), tname=data['tname']) @@ -1211,7 +713,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_oftype(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1220,7 +722,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_oftype.sql']), scid=scid, server_type=self.manager.server_type, show_sys_objects=self.blueprint.show_system_objects) @@ -1241,7 +743,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_inherits(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1250,7 +752,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ try: res = [] - SQL = render_template("/".join([self.template_path, 'get_inherits.sql']), + SQL = render_template("/".join([self.table_template_path, 'get_inherits.sql']), show_system_objects=self.blueprint.show_system_objects, tid=tid, server_type=self.manager.server_type) @@ -1271,7 +773,38 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition + def get_attach_tables(self, gid, sid, did, scid, tid=None): + """ + Returns: + This function will return list of tables available to be attached + to the partitioned table. + """ + try: + res = [] + SQL = render_template("/".join( + [self.partition_template_path, 'get_attach_tables.sql']), + tid=tid + ) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=res) + + for row in rset['rows']: + res.append( + {'label': row['table_name'], 'value': row['oid']} + ) + + return make_json_response( + data=res, + status=200 + ) + + except Exception as e: + return internal_server_error(errormsg=str(e)) + + @BaseTableView.check_precondition def get_relations(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1280,7 +813,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, 'get_relations.sql']), + SQL = render_template("/".join([self.table_template_path, 'get_relations.sql']), show_sys_objects=self.blueprint.show_system_objects, server_type=self.manager.server_type) status, rset = self.conn.execute_2darray(SQL) @@ -1299,23 +832,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return internal_server_error(errormsg=str(e)) @staticmethod - def _cltype_formatter(data_type): - """ - - Args: - data_type: Type string - - Returns: - We need to remove [] from type and append it - after length/precision so we will send flag for - sql template - """ - if '[]' in data_type: - return data_type[:-2], True - else: - return data_type, False - - @staticmethod def convert_length_precision_to_string(data): """ This function is used to convert length & precision to string @@ -1409,8 +925,95 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data['name'] = str(data['name']) return data + def get_schema_and_table_name(self, tid): + """ + This function will fetch the schema qualified name of the + given table id. + + :param tid: Table Id. + """ + # Get schema oid + status, scid = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema_oid.sql']), tid=tid)) + if not status: + return internal_server_error(errormsg=scid) + + # Fetch schema name + status, schema_name = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema.sql']), conn=self.conn, + scid=scid) + ) + if not status: + return internal_server_error(errormsg=schema_name) + + # Fetch Table name + status, table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=table_name) + + return schema_name, table_name - @check_precondition + def get_partitions_sql(self, partitions): + """ + This function will iterate all the partitions and create SQL. + + :param partitions: List of partitions + """ + sql = '' + + for row in partitions['partitions']: + part_data = dict() + part_data['partitioned_table_name'] = partitions['name'] + part_data['parent_schema'] = partitions['schema'] + + if 'is_attach' in row and row['is_attach']: + schema_name, table_name = \ + self.get_schema_and_table_name(row['partition_name']) + + part_data['schema'] = schema_name + part_data['name'] = table_name + else: + part_data['schema'] = partitions['schema'] + part_data['relispartition'] = True + part_data['name'] = row['partition_name'] + + if partitions['partition_type'] == 'range': + range_from = row['values_from'].split(',') + range_to = row['values_to'].split(',') + + from_str = ', '.join("'{0}'".format(item) for item in range_from) + to_str = ', '.join("'{0}'".format(item) for item in range_to) + + part_data['partition_value'] = 'FOR VALUES FROM (' + from_str \ + + ') TO (' + to_str + ')' + else: + range_in = row['values_in'].split(',') + in_str = ', '.join("'{0}'".format(item) for item in range_in) + part_data['partition_value'] = 'FOR VALUES IN (' + in_str + ')' + + if 'is_attach' in row and row['is_attach']: + partition_sql = render_template( + "/".join([self.partition_template_path, 'attach.sql']), + data=part_data, conn=self.conn + ) + else: + partition_sql = render_template( + "/".join([self.partition_template_path, 'create.sql']), + data=part_data, conn=self.conn + ) + + sql += partition_sql + '\n' + + return sql + + @BaseTableView.check_precondition def create(self, gid, sid, did, scid): """ This function will creates new the table object @@ -1474,9 +1077,22 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): c['remote_table'] = rset['rows'][0]['table'] try: - SQL = render_template("/".join([self.template_path, + partitions_sql = '' + partitioned = False + if 'is_partitioned' in data and data['is_partitioned']: + data['relkind'] = 'p' + # create partition scheme + data['partition_scheme'] = self.get_partition_scheme(data) + partitions_sql = self.get_partitions_sql(data) + partitioned = True + + SQL = render_template("/".join([self.table_template_path, 'create.sql']), data=data, conn=self.conn) + + # Append SQL for partitions + SQL += '\n' + partitions_sql + status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) @@ -1490,7 +1106,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data['name'] = data['name'][0:CONST_MAX_CHAR_COUNT] # Get updated schema oid - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_schema_oid.sql']), tname=data['name']) status, scid = self.conn.execute_scalar(SQL) @@ -1498,7 +1114,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return internal_server_error(errormsg=scid) # we need oid to to add object in tree at browser - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_oid.sql']), scid=scid, data=data) status, tid = self.conn.execute_scalar(SQL) if not status: @@ -1509,13 +1125,14 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): tid, scid, data['name'], - icon="icon-table" + icon="icon-table", + is_partitioned=partitioned ) ) except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def update(self, gid, sid, did, scid, tid): """ This function will update an existing table object @@ -1545,7 +1162,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): if not status: return internal_server_error(errormsg=res) - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'get_schema_oid.sql']), tid=tid) status, res = self.conn.execute_2darray(SQL) if not status: @@ -1554,18 +1171,29 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): # new schema id scid = res['rows'][0]['scid'] + # Check the table is partitioned table or not + SQL = render_template("/".join([self.table_template_path, + 'nodes.sql']), + scid=scid, tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + if len(rset['rows']) == 0: + return gone(gettext("Could not find the table.")) + return jsonify( node=self.blueprint.generate_browser_node( tid, scid, name, - icon="icon-%s" % self.node_type + icon="icon-%s" % self.node_type, + is_partitioned=rset['rows'][0]['is_partitioned'] if 'is_partitioned' in rset['rows'][0] else False ) ) except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def delete(self, gid, sid, did, scid, tid): """ This function will deletes the table object @@ -1585,7 +1213,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): cascade = False try: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -1606,7 +1234,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = res['rows'][0] - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'delete.sql']), data=data, cascade=cascade, conn=self.conn) @@ -1626,7 +1254,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def truncate(self, gid, sid, did, scid, tid): """ This function will truncate the table object @@ -1646,7 +1274,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): is_cascade = json.loads(data['cascade']) try: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -1655,7 +1283,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return internal_server_error(errormsg=res) data = res['rows'][0] - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'truncate.sql']), data=data, cascade=is_cascade) status, res = self.conn.execute_scalar(SQL) @@ -1674,7 +1302,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def enable_disable_triggers(self, gid, sid, did, scid, tid): """ This function will enable/disable trigger(s) on the table object @@ -1694,7 +1322,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): is_enable = json.loads(data['enable']) try: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -1703,7 +1331,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return internal_server_error(errormsg=res) data = res['rows'][0] - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'enable_disable_trigger.sql']), data=data, is_enable_trigger=is_enable) status, res = self.conn.execute_scalar(SQL) @@ -1723,7 +1351,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def reset(self, gid, sid, did, scid, tid): """ This function will reset statistics of table @@ -1735,27 +1363,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): scid: Schema ID tid: Table ID """ - try: - SQL = render_template("/".join([self.template_path, - 'reset_stats.sql']), - tid=tid) - status, res = self.conn.execute_scalar(SQL) - if not status: - return internal_server_error(errormsg=res) - - return make_json_response( - success=1, - info=gettext("Table statistics have been reset"), - data={ - 'id': tid, - 'scid': scid - } - ) + return BaseTableView.reset_statistics(self, scid, tid) - except Exception as e: - return internal_server_error(errormsg=str(e)) - - @check_precondition + @BaseTableView.check_precondition def msql(self, gid, sid, did, scid, tid=None): """ This function will create modified sql for table object @@ -2188,7 +1798,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): coming from client """ if tid is not None: - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2244,7 +1854,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): set(old_data['coll_inherits']) - set(data['coll_inherits']) ) - SQL = render_template("/".join([self.template_path, 'update.sql']), + SQL = render_template("/".join([self.table_template_path, 'update.sql']), o_data=old_data, data=data, conn=self.conn) # Removes training new lines SQL = SQL.strip('\n') + '\n\n' @@ -2342,6 +1952,44 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): # Combine all the SQL together SQL += column_sql.strip('\n') + # Check for partitions + if 'partitions' in data: + partitions = data['partitions'] + partitions_sql = '\n' + + # If partition(s) is/are deleted + if 'deleted' in partitions: + for row in partitions['deleted']: + temp_data = dict() + schema_name, table_name = \ + self.get_schema_and_table_name(row['oid']) + + temp_data['parent_schema'] = old_data['schema'] + temp_data['partitioned_table_name'] = old_data['name'] + temp_data['schema'] = schema_name + temp_data['name'] = table_name + + # Sql for detach partition + partitions_sql += render_template("/".join( + [self.partition_template_path, 'detach.sql']), + data=temp_data, conn=self.conn).strip('\n') + '\n\n' + + # If partition(s) is/are added + if 'added' in partitions: + temp_data = dict() + temp_data['schema'] = old_data['schema'] + temp_data['name'] = old_data['name'] + # get the partition type + temp_data['partition_type'] = \ + old_data['partition_scheme'].split()[0].lower() + temp_data['partitions'] = partitions['added'] + + partitions_sql += \ + self.get_partitions_sql(temp_data).strip('\n') + '\n\n' + + # Combine all the SQL together + SQL += partitions_sql.strip('\n') + # Check if index constraints are added/changed/deleted index_constraint_sql = self.get_index_constraint_sql(did, tid, data) # If we have index constraint sql then ad it in main sql @@ -2405,15 +2053,49 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): c['remote_schema'] = rset['rows'][0]['schema'] c['remote_table'] = rset['rows'][0]['table'] - # If the request for new object which do not have did - SQL = render_template("/".join([self.template_path, 'create.sql']), + partitions_sql = '' + if 'is_partitioned' in data and data['is_partitioned']: + data['relkind'] = 'p' + # create partition scheme + data['partition_scheme'] = self.get_partition_scheme(data) + partitions_sql = self.get_partitions_sql(data) + + SQL = render_template("/".join([self.table_template_path, + 'create.sql']), data=data, conn=self.conn) + + # Append SQL for partitions + SQL += '\n' + partitions_sql + SQL = re.sub('\n{2,}', '\n\n', SQL) SQL = SQL.strip('\n') return SQL, data['name'] if 'name' in data else old_data['name'] @staticmethod + def get_partition_scheme(data): + partition_scheme = None + if 'partition_type' in data \ + and data['partition_type'] == 'range': + partition_scheme = 'RANGE (' + elif 'partition_type' in data \ + and data['partition_type'] == 'list': + partition_scheme = 'LIST (' + + for row in data['partition_keys']: + if row['key_type'] == 'column': + partition_scheme += row['pt_column'] + ', ' + elif row['key_type'] == 'expression': + partition_scheme += row['expression'] + ', ' + + # Remove extra space and comma + if len(data['partition_keys']) > 0: + partition_scheme = partition_scheme[:-2] + partition_scheme += ')' + + return partition_scheme + + @staticmethod def validate_constrains(key, data): if key == 'primary_key' or key == 'unique_constraint': @@ -2447,7 +2129,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return True - @check_precondition + @BaseTableView.check_precondition def dependents(self, gid, sid, did, scid, tid): """ This function get the dependents and return ajax response @@ -2460,43 +2142,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): scid: Schema ID tid: Table ID """ - # Specific condition for column which we need to append - where = "WHERE dep.refobjid={0}::OID".format(tid) - - dependents_result = self.get_dependents( - self.conn, tid - ) - - # Specific sql to run againt column to fetch dependents - SQL = render_template("/".join([self.template_path, - 'depend.sql']), where=where) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - for row in res['rows']: - ref_name = row['refname'] - if ref_name is None: - continue - - dep_type = '' - dep_str = row['deptype'] - if dep_str == 'a': - dep_type = 'auto' - elif dep_str == 'n': - dep_type = 'normal' - elif dep_str == 'i': - dep_type = 'internal' - - dependents_result.append({'type': 'sequence', 'name': ref_name, 'field': dep_type}) - - return ajax_response( - response=dependents_result, - status=200 - ) + return BaseTableView.get_table_dependents(self, tid) - @check_precondition + @BaseTableView.check_precondition def dependencies(self, gid, sid, did, scid, tid): """ This function get the dependencies and return ajax response @@ -2508,18 +2156,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): did: Database ID scid: Schema ID tid: Table ID - """ - dependencies_result = self.get_dependencies( - self.conn, tid - ) - - return ajax_response( - response=dependencies_result, - status=200 - ) + return BaseTableView.get_table_dependencies(self, tid) - @check_precondition + @BaseTableView.check_precondition def sql(self, gid, sid, did, scid, tid): """ This function will creates reverse engineered sql for @@ -2534,12 +2174,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ main_sql = [] - """ - ##################################### - # 1) Reverse engineered sql for TABLE - ##################################### - """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2552,250 +2187,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = res['rows'][0] - # Table & Schema declaration so that we can use them in child nodes - schema = data['schema'] - table = data['name'] - - data = self._formatter(did, scid, tid, data) - - # Now we have all lis of columns which we need - # to include in our create definition, Let's format them - if 'columns' in data: - for c in data['columns']: - if 'attacl' in c: - c['attacl'] = parse_priv_to_db(c['attacl'], self.column_acl) - - # check type for '[]' in it - if 'cltype' in c: - c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype']) - - sql_header = u"-- Table: {0}\n\n-- ".format(self.qtIdent(self.conn, - data['schema'], - data['name'])) + return BaseTableView.get_reverse_engineered_sql( + self, did, scid, tid, main_sql, data) - sql_header += render_template("/".join([self.template_path, - 'delete.sql']), - data=data, conn=self.conn) - - sql_header = sql_header.strip('\n') - sql_header += '\n' - - # Add into main sql - main_sql.append(sql_header) - - # Parse privilege data - if 'relacl' in data: - data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) - - # If the request for new object which do not have did - table_sql = render_template("/".join([self.template_path, - 'create.sql']), - data=data, conn=self.conn, is_sql=True) - - # Add into main sql - table_sql = re.sub('\n{2,}', '\n\n', table_sql) - main_sql.append(table_sql.strip('\n')) - - """ - ###################################### - # 2) Reverse engineered sql for INDEX - ###################################### - """ - - SQL = render_template("/".join([self.index_template_path, - 'nodes.sql']), tid=tid) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - - SQL = render_template("/".join([self.index_template_path, - 'properties.sql']), - did=did, tid=tid, idx=row['oid'], - datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - data = dict(res['rows'][0]) - # Adding parent into data dict, will be using it while creating sql - data['schema'] = schema - data['table'] = table - # We also need to fecth columns of index - SQL = render_template("/".join([self.index_template_path, - 'column_details.sql']), - idx=row['oid']) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - # 'attdef' comes with quotes from query so we need to strip them - # 'options' we need true/false to render switch ASC(false)/DESC(true) - columns = [] - cols = [] - for col_row in rset['rows']: - # We need all data as collection for ColumnsModel - cols_data = { - 'colname': col_row['attdef'].strip('"'), - 'collspcname': col_row['collnspname'], - 'op_class': col_row['opcname'], - } - if col_row['options'][0] == 'DESC': - cols_data['sort_order'] = True - columns.append(cols_data) - - # We need same data as string to display in properties window - # If multiple column then separate it by colon - cols_str = col_row['attdef'] - if col_row['collnspname']: - cols_str += ' COLLATE ' + col_row['collnspname'] - if col_row['opcname']: - cols_str += ' ' + col_row['opcname'] - if col_row['options'][0] == 'DESC': - cols_str += ' DESC' - cols.append(cols_str) - - # Push as collection - data['columns'] = columns - # Push as string - data['cols'] = ', '.join(cols) - - sql_header = u"\n-- Index: {0}\n\n-- ".format(data['name']) - - sql_header += render_template("/".join([self.index_template_path, - 'delete.sql']), - data=data, conn=self.conn) - - index_sql = render_template("/".join([self.index_template_path, - 'create.sql']), - data=data, conn=self.conn) - index_sql += "\n" - index_sql += render_template("/".join([self.index_template_path, - 'alter.sql']), - data=data, conn=self.conn) - - # Add into main sql - index_sql = re.sub('\n{2,}', '\n\n', index_sql) - main_sql.append(sql_header + '\n\n' + index_sql.strip('\n')) - - """ - ######################################## - # 3) Reverse engineered sql for TRIGGERS - ######################################## - """ - SQL = render_template("/".join([self.trigger_template_path, - 'nodes.sql']), tid=tid) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - trigger_sql = '' - - SQL = render_template("/".join([self.trigger_template_path, - 'properties.sql']), - tid=tid, trid=row['oid'], - datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - data = dict(res['rows'][0]) - # Adding parent into data dict, will be using it while creating sql - data['schema'] = schema - data['table'] = table - - data = self.get_trigger_function_schema(data) - - if len(data['custom_tgargs']) > 1: - # We know that trigger has more than 1 argument, let's join them - data['tgargs'] = self._format_args(data['custom_tgargs']) - - if len(data['tgattr']) > 1: - columns = ', '.join(data['tgattr'].split(' ')) - - SQL = render_template("/".join([self.trigger_template_path, - 'get_columns.sql']), - tid=tid, clist=columns) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - # 'tgattr' contains list of columns from table used in trigger - columns = [] - - for col_row in rset['rows']: - columns.append({'column': col_row['name']}) - - data['columns'] = columns - - data = trigger_definition(data) - - sql_header = u"\n-- Trigger: {0}\n\n-- ".format(data['name']) - - sql_header += render_template("/".join([self.trigger_template_path, - 'delete.sql']), - data=data, conn=self.conn) - - # If the request for new object which do not have did - trigger_sql = render_template("/".join([self.trigger_template_path, - 'create.sql']), - data=data, conn=self.conn) - - trigger_sql = sql_header + '\n\n' + trigger_sql.strip('\n') - - # If trigger is disabled then add sql code for the same - if not data['is_enable_trigger']: - trigger_sql += '\n\n' - trigger_sql += render_template("/".join([ - self.trigger_template_path, - 'enable_disable_trigger.sql']), - data=data, conn=self.conn) - - # Add into main sql - trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql) - main_sql.append(trigger_sql) - - """ - ##################################### - # 4) Reverse engineered sql for RULES - ##################################### - """ - - SQL = render_template("/".join( - [self.rules_template_path, 'properties.sql']), tid=tid) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - rules_sql = '\n' - SQL = render_template("/".join( - [self.rules_template_path, 'properties.sql'] - ), rid=row['oid'], datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - res_data = parse_rule_definition(res) - rules_sql += render_template("/".join( - [self.rules_template_path, 'create.sql']), - data=res_data, display_comments=True) - - # Add into main sql - rules_sql = re.sub('\n{2,}', '\n\n', rules_sql) - main_sql.append(rules_sql) - - sql = '\n'.join(main_sql) - - return ajax_response(response=sql.strip('\n')) - - @check_precondition + @BaseTableView.check_precondition def select_sql(self, gid, sid, did, scid, tid): """ SELECT script sql for the object @@ -2810,7 +2205,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: SELECT Script sql for the object """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2839,7 +2234,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): ) return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def insert_sql(self, gid, sid, did, scid, tid): """ INSERT script sql for the object @@ -2854,7 +2249,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: INSERT Script sql for the object """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2886,7 +2281,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def update_sql(self, gid, sid, did, scid, tid): """ UPDATE script sql for the object @@ -2901,7 +2296,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: UPDATE Script sql for the object """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2935,7 +2330,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def delete_sql(self, gid, sid, did, scid, tid): """ DELETE script sql for the object @@ -2950,7 +2345,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: DELETE Script sql for the object """ - SQL = render_template("/".join([self.template_path, + SQL = render_template("/".join([self.table_template_path, 'properties.sql']), did=did, scid=scid, tid=tid, datlastsysoid=self.datlastsysoid) @@ -2966,7 +2361,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def statistics(self, gid, sid, did, scid, tid=None): """ Statistics @@ -2982,63 +2377,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): otherwise it will return statistics for all the tables in that schema. """ - - # Fetch schema name - status, schema_name = self.conn.execute_scalar( - render_template( - "/".join([self.template_path, 'get_schema.sql']), - conn=self.conn, scid=scid - ) - ) - if not status: - return internal_server_error(errormsg=schema_name) - - if tid is None: - status, res = self.conn.execute_dict( - render_template( - "/".join([self.template_path, 'coll_table_stats.sql']), - conn=self.conn, schema_name=schema_name - ) - ) - else: - # For Individual table stats - - # Check if pgstattuple extension is already created? - # if created then only add extended stats - status, is_pgstattuple = self.conn.execute_scalar(""" - SELECT (count(extname) > 0) AS is_pgstattuple - FROM pg_extension - WHERE extname='pgstattuple' - """) - if not status: - return internal_server_error(errormsg=is_pgstattuple) - - # Fetch Table name - status, table_name = self.conn.execute_scalar( - render_template( - "/".join([self.template_path, 'get_table.sql']), - conn=self.conn, scid=scid, tid=tid - ) - ) - if not status: - return internal_server_error(errormsg=table_name) - - status, res = self.conn.execute_dict( - render_template( - "/".join([self.template_path, 'stats.sql']), - conn=self.conn, schema_name=schema_name, - table_name=table_name, - is_pgstattuple=is_pgstattuple, tid=tid - ) - ) - - if not status: - return internal_server_error(errormsg=res) - - return make_json_response( - data=res, - status=200 - ) - + return BaseTableView.get_table_statistics(self, scid, tid) TableView.register_node_view(blueprint) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js index 179b961..7252949 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js @@ -228,6 +228,17 @@ define('pgadmin.node.column', [ return false; } + // If table is partitioned table then disable + if (m.top && !_.isUndefined(m.top.get('is_partitioned')) && + m.top.get('is_partitioned')) + { + setTimeout(function () { + m.set('is_primary_key', false); + }, 10); + + return false; + } + if(!m.inSchemaWithColumnCheck.apply(this, [m]) && !_.isUndefined(name) && !_.isNull(name) && name !== '') { return true; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js index 82bb829..7ec1af9 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js @@ -133,6 +133,18 @@ define('pgadmin.node.check_constraints', [ 'switch', cell: 'boolean', group: gettext('Definition'), mode: ['properties', 'create', 'edit'], min_version: 90200, disabled: function(m) { + // Disabled if table is a partitioned table. + if ((_.has(m , 'top') && !_.isUndefined(m.top) && m.top.get('is_partitioned')) || + (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + ){ + setTimeout(function(){ + m.set('connoinherit', false); + },10); + + return true; + } + return ((_.has(m, 'handler') && !_.isUndefined(m.handler) && !_.isUndefined(m.get('oid'))) || (_.isFunction(m.isNew) && !m.isNew())); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js index 814edce..c21eac5 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js @@ -919,6 +919,10 @@ define('pgadmin.node.exclusion_constraint', [ var t = pgBrowser.tree, i = item, d = itemData, parents = []; // To iterate over tree to check parent node while (i) { + // If table is partitioned table then return false + if ('is_partitioned' in d && d.is_partitioned) + return false; + // If it is schema then allow user to create table if (_.indexOf(['schema'], d._type) > -1) return true; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js index 84b7adf..dbbd499 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js @@ -1067,6 +1067,10 @@ define('pgadmin.node.foreign_key', [ var t = pgBrowser.tree, i = item, d = itemData, parents = []; // To iterate over tree to check parent node while (i) { + // If table is partitioned table then return false + if ('is_partitioned' in d && d.is_partitioned) + return false; + // If it is schema then allow user to c reate table if (_.indexOf(['schema'], d._type) > -1) return true; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js index 97a404a..772b73d 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js @@ -48,6 +48,10 @@ define('pgadmin.node.{{node_type}}', [ var t = pgBrowser.tree, i = item, d = itemData, parents = []; // To iterate over tree to check parent node while (i) { + // If table is partitioned table then return false + if ('is_partitioned' in d && d.is_partitioned) + return false; + // If it is schema then allow user to c reate table if (_.indexOf(['schema'], d._type) > -1) { {% if node_type == 'primary_key' %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py index e52c15b..85da766 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py @@ -72,6 +72,18 @@ class IndexesModule(CollectionNodeModule): if super(IndexesModule, self).BackendSupported(manager, **kwargs): conn = manager.connection(did=kwargs['did']) + # In case of partitioned table return false. + if 'tid' in kwargs and manager.version >= 100000: + partition_path = 'partition/sql/#{0}#'.format(manager.version) + SQL = render_template("/".join( + [partition_path, 'backend_support.sql']), tid=kwargs['tid']) + status, res = conn.execute_scalar(SQL) + + # check if any errors + if not status: + return internal_server_error(errormsg=res) + return not res + if 'vid' not in kwargs: return True diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py new file mode 100644 index 0000000..63156de --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py @@ -0,0 +1,411 @@ +########################################################################## +# +# pgAdmin 4 - PostgreSQL Tools +# +# Copyright (C) 2013 - 2017, The pgAdmin Development Team +# This software is released under the PostgreSQL Licence +# +########################################################################## + +""" Implements Partitions Node """ + +import simplejson as json +import pgadmin.browser.server_groups.servers.databases as database +from flask import render_template, request, jsonify +from flask_babel import gettext +from pgadmin.browser.server_groups.servers.databases.schemas.utils \ + import DataTypeReader, VacuumSettings +from pgadmin.utils.ajax import make_json_response, internal_server_error, \ + make_response as ajax_response, gone +from pgadmin.browser.server_groups.servers.databases.schemas.tables.utils \ + import BaseTableView +from pgadmin.browser.collection import CollectionNodeModule + + +class PartitionsModule(CollectionNodeModule): + """ + class PartitionsModule(CollectionNodeModule) + + A module class for Partition node derived from CollectionNodeModule. + + Methods: + ------- + * __init__(*args, **kwargs) + - Method is used to initialize the Partition and it's base module. + + * get_nodes(gid, sid, did, scid, tid) + - Method is used to generate the browser collection node. + + * node_inode() + - Method is overridden from its base class to make the node as leaf node. + + * script_load() + - Load the module script for schema, when any of the server node is + initialized. + """ + + NODE_TYPE = 'partition' + COLLECTION_LABEL = gettext("Partitions") + + def __init__(self, *args, **kwargs): + """ + Method is used to initialize the PartitionsModule and it's base module. + + Args: + *args: + **kwargs: + """ + super(PartitionsModule, self).__init__(*args, **kwargs) + self.min_ver = 100000 + self.max_ver = None + + def get_nodes(self, gid, sid, did, scid, **kwargs): + """ + Generate the collection node + """ + assert ('tid' in kwargs or 'vid' in kwargs) + yield self.generate_browser_collection_node( + kwargs['tid'] if 'tid' in kwargs else kwargs['vid'] + ) + + @property + def script_load(self): + """ + Load the module script for server, when any of the server-group node is + initialized. + """ + return database.DatabaseModule.NODE_TYPE + + @property + def node_inode(self): + """ + Load the module node as a leaf node + """ + return False + + def BackendSupported(self, manager, **kwargs): + """ + Load this module if it is a partition table + """ + if CollectionNodeModule.BackendSupported(self, manager, **kwargs): + conn = manager.connection(did=kwargs['did']) + + template_path = 'partition/sql/#{0}#'.format(manager.version) + SQL = render_template("/".join( + [template_path, 'backend_support.sql']), tid=kwargs['tid']) + status, res = conn.execute_scalar(SQL) + + # check if any errors + if not status: + return internal_server_error(errormsg=res) + + return res + + +blueprint = PartitionsModule(__name__) + + +class PartitionsView(BaseTableView, DataTypeReader, VacuumSettings): + """ + This class is responsible for generating routes for Partition node + + Methods: + ------- + + * list() + - This function is used to list all the Partition nodes within that + collection. + + * nodes() + - This function will used to create all the child node within that + collection, Here it will create all the Partition node. + + * properties(gid, sid, did, scid, tid, ptid) + - This function will show the properties of the selected Partition node + + """ + + node_type = blueprint.node_type + + parent_ids = [ + {'type': 'int', 'id': 'gid'}, + {'type': 'int', 'id': 'sid'}, + {'type': 'int', 'id': 'did'}, + {'type': 'int', 'id': 'scid'}, + {'type': 'int', 'id': 'tid'} + ] + ids = [ + {'type': 'int', 'id': 'ptid'} + ] + + operations = dict({ + 'obj': [ + {'get': 'properties', 'delete': 'delete', 'put': 'update'}, + {'get': 'list', 'post': 'create'} + ], + 'nodes': [{'get': 'nodes'}, {'get': 'nodes'}], + 'sql': [{'get': 'sql'}], + 'msql': [{'get': 'msql'}, {}], + 'module.js': [{}, {}, {'get': 'module_js'}], + 'detach': [{'put': 'detach'}] + }) + + @BaseTableView.check_precondition + def list(self, gid, sid, did, scid, tid): + """ + This function is used to list all the table nodes within that + collection. + + Args: + gid: Server group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + + Returns: + JSON of available table nodes + """ + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=res) + return ajax_response( + response=res['rows'], + status=200 + ) + + @BaseTableView.check_precondition + def nodes(self, gid, sid, did, scid, tid, ptid=None): + """ + This function is used to list all the table nodes within that + collection. + + Args: + gid: Server group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Parent Table ID + ptid: Partition Table ID + + Returns: + JSON of available table nodes + """ + SQL = render_template( + "/".join([self.partition_template_path, 'nodes.sql']), + scid=scid, tid=tid + ) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + def browser_node(row): + return self.blueprint.generate_browser_node( + row['oid'], + tid, + row['name'], + icon="icon-partition", + tigger_count=row['triggercount'], + has_enable_triggers=row['has_enable_triggers'], + is_partitioned=row['is_partitioned'], + parent_schema_id=scid, + schema_id=row['schema_id'], + schema_name=row['schema_name'] + ) + + if ptid is not None: + if len(rset['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found." + )) + + return make_json_response( + data=browser_node(rset['rows'][0]), status=200 + ) + + res = [] + for row in rset['rows']: + res.append(browser_node(row)) + + return make_json_response( + data=res, + status=200 + ) + + @BaseTableView.check_precondition + def properties(self, gid, sid, did, scid, tid, ptid): + """ + This function will show the properties of the selected table node. + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + + Returns: + JSON of selected table node + """ + + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + if len(res['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found.")) + + data = res['rows'][0] + + data['vacuum_settings_str'] = "" + + if data['table_vacuum_settings_str'] is not None: + data['vacuum_settings_str'] += data[ + 'table_vacuum_settings_str'].replace(',', '\n') + + if data['toast_table_vacuum_settings_str'] is not None: + data['vacuum_settings_str'] += '\n' + '\n'.join( + ['toast_' + setting for setting in data[ + 'toast_table_vacuum_settings_str' + ].split(',')] + ) + data['vacuum_settings_str'] = data[ + 'vacuum_settings_str' + ].replace("=", " = ") + + return ajax_response( + response=data, + status=200 + ) + + @BaseTableView.check_precondition + def sql(self, gid, sid, did, scid, tid, ptid): + """ + This function will creates reverse engineered sql for + the table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + """ + main_sql = [] + + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + if len(res['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found.")) + + data = res['rows'][0] + + return BaseTableView.get_reverse_engineered_sql(self, did, scid, ptid, + main_sql, data) + + @BaseTableView.check_precondition + def detach(self, gid, sid, did, scid, tid, ptid): + """ + This function will reset statistics of table + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + """ + # Fetch schema name + status, parent_schema = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_schema.sql']), + conn=self.conn, scid=scid + ) + ) + if not status: + return internal_server_error(errormsg=parent_schema) + + # Fetch Parent Table name + status, partitioned_table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=partitioned_table_name) + + # Get schema oid of partition + status, pscid = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema_oid.sql']), tid=ptid)) + if not status: + return internal_server_error(errormsg=scid) + + # Fetch schema name + status, partition_schema = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema.sql']), conn=self.conn, + scid=pscid) + ) + if not status: + return internal_server_error(errormsg=partition_schema) + + # Fetch Partition Table name + status, partition_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=pscid, tid=ptid + ) + ) + if not status: + return internal_server_error(errormsg=partition_name) + + try: + temp_data = dict() + temp_data['parent_schema'] = parent_schema + temp_data['partitioned_table_name'] = partitioned_table_name + temp_data['schema'] = partition_schema + temp_data['name'] = partition_name + + SQL = render_template("/".join( + [self.partition_template_path, 'detach.sql']), + data=temp_data, conn=self.conn) + + status, res = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + success=1, + info=gettext("Partition detached."), + data={ + 'id': ptid, + 'scid': scid + } + ) + except Exception as e: + return internal_server_error(errormsg=str(e)) + +PartitionsView.register_node_view(blueprint) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png new file mode 100644 index 0000000000000000000000000000000000000000..680e86457e8c48cd01329f0303d663b0304ab44d GIT binary patch literal 555 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!63?wyl`GbMf-Te%o613!~@ehb_5 z*>m*=+of*}=e<^+`SRYqd(Ef+dI3e2yw#Zb^8Wq%j~+dG{P^+Hr%&4tJb&=u!SdzH z*Q{Cd=FOXT@7}$9`SRhzhs%~NTd`ur+O=!fuV4S_)vF1oK0kl{{PpYCFJ8P@xpL+E z_wPS`{P_0m+f}PpZP>73)22;No;>;R;X^gZ0Y5ZmzF7BH^&ij!j3q&S!3+-1ZlnP@ zoCO|{#X#BvjNMLV+W{F%JzX3_BreCEKQ7i}Akg}-$5cydO$Teo7MG=+N#Fmg3_MdIbtj55z467wYr(8Tf`DM;NgJ}*2Ts9xt za8DvyRL7)e=Crp3yMhn8I2OC7#SEE=o%X78W@Hc8CaPZTbY<@8<<)d7|eU8w+cl=ZhlH;S|vn-fhAZ2 cP_>Dbff+=@sp+9>fEpM)UHx3vIVCg!0JlsKi2wiq literal 0 HcmV?d00001 diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png new file mode 100644 index 0000000000000000000000000000000000000000..37b2227d8fe0cf76bde23855676403c57ccfb569 GIT binary patch literal 593 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!63?wyl`GbMfjR2nzS0Jr8^?C1w|C7%DUUd2E zt{ZPJ-hX-j@$;w8oT%hof0v-kaQT=vdv;Tyd3Q5-o1ObdiClJ8#Zj*xN*~_O&>pg{QUXz*RNl;OkH}&m?E%JaC$sHdY(w2yI*ETJB#CxDVyM_6hsq?*{ z%bR2yj^^b{bb0Zwd(Bu=J=bCx>jtGG3nR*NpRqG!KlWJZ=J(tnh2i}TIU#U}a)#Wn!vrU}|MxFz=n-DijU5`6-!cl@JXEmS7D))h1R3W)Kah TriZQpYGCkm^>bP0l+XkKw!A9P literal 0 HcmV?d00001 diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/templates/partition/js/partition.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/templates/partition/js/partition.js new file mode 100644 index 0000000..3368f75 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/templates/partition/js/partition.js @@ -0,0 +1,759 @@ +define( + ['sources/gettext', 'jquery', 'underscore', 'underscore.string', 'pgadmin', 'pgadmin.browser', + 'backform', 'alertify', 'pgadmin.browser.collection'], +function(gettext, $, _, S, pgAdmin, pgBrowser, Backform, alertify) { + + if (!pgBrowser.Nodes['coll-partition']) { + var databases = pgAdmin.Browser.Nodes['coll-partition'] = + pgAdmin.Browser.Collection.extend({ + node: 'partition', + label: gettext('Partitions'), + type: 'coll-partition', + columns: ['name', 'schema', 'partition_value', 'description'], + hasStatistics: true + }); + }; + + if (!pgBrowser.Nodes['partition']) { + pgAdmin.Browser.Nodes['partition'] = pgAdmin.Browser.Node.extend({ + parent_type: 'table', + collection_type: 'coll-partition', + type: 'partition', + label: gettext('Partition'), + hasSQL: true, + hasDepends: true, + hasStatistics: true, + statsPrettifyFields: ['Size', 'Indexes size', 'Table size', + 'Toast table size', 'Tuple length', + 'Dead tuple length', 'Free space'], + sqlAlterHelp: 'sql-altertable.html', + sqlCreateHelp: 'sql-createtable.html', + dialogHelp: '{{ url_for('help.static', filename='table_dialog.html') }}', + hasScriptTypes: ['create'], + height: '95%', + width: '85%', + Init: function() { + /* Avoid mulitple registration of menus */ + if (this.initialized) + return; + + this.initialized = true; + + pgBrowser.add_menus([{ + name: 'truncate_table', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'truncate_table', + category: 'Truncate', priority: 3, label: gettext('Truncate'), + icon: 'fa fa-eraser', enable : 'canCreate' + },{ + name: 'truncate_table_cascade', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'truncate_table_cascade', + category: 'Truncate', priority: 3, label: gettext('Truncate Cascade'), + icon: 'fa fa-eraser', enable : 'canCreate' + },{ + name: 'reset_table_stats', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'reset_table_stats', + category: 'Reset', priority: 4, label: gettext('Reset Statistics'), + icon: 'fa fa-bar-chart', enable : 'canCreate' + },{ + name: 'detach_partition', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'detach_partition', + priority: 2, label: gettext('Detach Partition'), + icon: 'fa fa-remove' + } + ]); + }, + getTreeNodeHierarchy: function(i) { + var idx = 0, + res = {}, + t = pgBrowser.tree; + + do { + d = t.itemData(i); + if ( + d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId + ) { + if (d._type == 'partition' && 'partition' in res) { + if (!('table' in res)) { + res['table'] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } else if (d._type == 'table') { + if (!('table' in res)) { + res['table'] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } else { + res[d._type] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } + i = t.hasParent(i) ? t.parent(i) : null; + } while (i); + + return res; + }, + generate_url: function(item, type, d, with_id, info) { + if (_.indexOf([ + 'stats', 'statistics', 'children', 'dependency', 'dependent', + 'truncate', 'reset' + ], type) == -1) { + return pgBrowser.Node.generate_url.apply(this, arguments); + } + + if (type == 'statistics') { + type = 'stats'; + } + + info = (_.isUndefined(item) || _.isNull(item)) ? + info || {} : this.getTreeNodeHierarchy(item); + + return S('table/%s/%s/%s/%s/%s/%s').sprintf( + encodeURIComponent(type), encodeURIComponent(info['server-group']._id), + encodeURIComponent(info['server']._id), + encodeURIComponent(info['database']._id), + encodeURIComponent(info['partition'].schema_id), + encodeURIComponent(info['partition']._id) + ).value(); + }, + canDrop: pgBrowser.Nodes['schema'].canChildDrop, + canDropCascade: pgBrowser.Nodes['schema'].canChildDrop, + callbacks: { + /* Enable trigger(s) on table */ + enable_triggers_on_table: function(args) { + var params = {'enable': true }; + this.callbacks.set_triggers.apply(this, [args, params]); + }, + /* Disable trigger(s) on table */ + disable_triggers_on_table: function(args) { + var params = {'enable': false }; + this.callbacks.set_triggers.apply(this, [args, params]); + }, + set_triggers: function(args, params) { + // This function will send request to enable or + // disable triggers on table level + var input = args || {}; + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + if (!d) + return false; + + $.ajax({ + url: obj.generate_url(i, 'set_trigger' , d, true), + type:'PUT', + data: params, + dataType: "json", + success: function(res) { + if (res.success == 1) { + alertify.success("{{ _('" + res.info + "') }}"); + t.unload(i); + t.setInode(i); + t.deselect(i); + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + }, + /* Truncate table */ + truncate_table: function(args) { + var params = {'cascade': false }; + this.callbacks.truncate.apply(this, [args, params]); + }, + /* Truncate table with cascade */ + truncate_table_cascade: function(args) { + var params = {'cascade': true }; + this.callbacks.truncate.apply(this, [args, params]); + }, + truncate: function(args, params) { + var input = args || {}; + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Truncate Table'), + S(gettext('Are you sure you want to truncate table %s?')).sprintf(d.label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'truncate' , d, true), + type:'PUT', + data: params, + dataType: "json", + success: function(res) { + if (res.success == 1) { + alertify.success("{{ _('" + res.info + "') }}"); + t.removeIcon(i); + data.icon = 'icon-table'; + t.addIcon(i, {icon: data.icon}); + t.unload(i); + t.setInode(i); + t.deselect(i); + // Fetch updated data from server + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + }}, + function() {} + ); + }, + reset_table_stats: function(args) { + var input = args || {}, + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Reset statistics'), + S(gettext('Are you sure you want to reset the statistics for table %s?')).sprintf(d._label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'reset' , d, true), + type:'DELETE', + success: function(res) { + if (res.success == 1) { + alertify.success("{{ _('" + res.info + "') }}"); + t.removeIcon(i); + data.icon = 'icon-table'; + t.addIcon(i, {icon: data.icon}); + t.unload(i); + t.setInode(i); + t.deselect(i); + // Fetch updated data from server + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + } + }, + function() {} + ); + }, + detach_partition: function(args) { + var input = args || {}, + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Detach Partition'), + S(gettext('Are you sure you want to detach the partition %s?')).sprintf(d._label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'detach' , d, true), + type:'PUT', + success: function(res) { + if (res.success == 1) { + alertify.success("{{ _('" + res.info + "') }}"); + var n = t.next(i); + if (!n || !n.length) { + n = t.prev(i); + if (!n || !n.length) { + n = t.parent(i); + t.setInode(n, true); + } + } + t.remove(i); + if (n.length) { + t.select(n); + } + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + } + }); + } + }, + function() {} + ); + } + }, + model: pgBrowser.Node.Model.extend({ + defaults: { + name: undefined, + oid: undefined, + spcoid: undefined, + spcname: undefined, + relowner: undefined, + relacl: undefined, + relhasoids: undefined, + relhassubclass: undefined, + reltuples: undefined, + description: undefined, + conname: undefined, + conkey: undefined, + isrepl: undefined, + triggercount: undefined, + relpersistence: undefined, + fillfactor: undefined, + reloftype: undefined, + typname: undefined, + labels: undefined, + providers: undefined, + is_sys_table: undefined, + coll_inherits: [], + hastoasttable: true, + toast_autovacuum_enabled: false, + autovacuum_enabled: false, + primary_key: [] + }, + // Default values! + initialize: function(attrs, args) { + var self = this; + + if (_.size(attrs) === 0) { + var userInfo = pgBrowser.serverInfo[args.node_info.server._id].user, + schemaInfo = args.node_info.schema; + + this.set({ + 'relowner': userInfo.name, 'schema': schemaInfo._label + }, {silent: true}); + } + pgBrowser.Node.Model.prototype.initialize.apply(this, arguments); + + }, + schema: [{ + id: 'name', label: gettext('Name'), type: 'text', + mode: ['properties', 'create', 'edit'], disabled: 'inSchema' + },{ + id: 'oid', label:gettext('OID'), type: 'text', mode: ['properties'] + },{ + id: 'relowner', label:gettext('Owner'), type: 'text', node: 'role', + mode: ['properties', 'create', 'edit'], select2: {allowClear: false}, + disabled: 'inSchema', control: 'node-list-by-name' + },{ + id: 'schema', label:'{{_('Schema')}}', type: 'text', node: 'schema', + control: 'node-list-by-name', mode: ['create', 'edit', 'properties'], + disabled: 'inSchema', filter: function(d) { + // If schema name start with pg_* then we need to exclude them + if(d && d.label.match(/^pg_/)) + { + return false; + } + return true; + }, cache_node: 'database', cache_level: 'database' + },{ + id: 'spcname', label:gettext('Tablespace'), node: 'tablespace', + type: 'text', control: 'node-list-by-name', disabled: 'inSchema', + mode: ['properties', 'create', 'edit'], + filter: function(d) { + // If tablespace name is not "pg_global" then we need to exclude them + return (!(d && d.label.match(/pg_global/))) + } + },{ + id: 'description', label:gettext('Comment'), type: 'multiline', + mode: ['properties', 'create', 'edit'], disabled: 'inSchema' + }, { + id: 'partition_value', label:gettext('Partition Scheme'), + type: 'text', visible: false + },{ + id: 'coll_inherits', label: gettext('Inherited from table(s)'), + type: 'text', group: gettext('Advanced'), mode: ['properties'] + },{ + id: 'inherited_tables_cnt', label:gettext('Inherited tables count'), + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },/*{ + // Here we will create tab control for constraints + type: 'nested', control: 'tab', group: gettext('Constraints'), + mode: ['edit', 'create'], + schema: [{ + id: 'primary_key', label: gettext('Primary key'), + model: pgBrowser.Nodes['primary_key'].model, + subnode: pgBrowser.Nodes['primary_key'].model, + editable: false, type: 'collection', + group: gettext('Primary Key'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, + control: 'unique-col-collection', + columns : ['name', 'columns'], + canAdd: true, + canAddRow: function(m) { + // User can only add one primary key + var columns = m.get('columns'); + + return (m.get('primary_key') && + m.get('primary_key').length < 1 && + _.some(columns.pluck('name'))); + } + },{ + id: 'foreign_key', label: gettext('Foreign key'), + model: pgBrowser.Nodes['foreign_key'].model, + subnode: pgBrowser.Nodes['foreign_key'].model, + editable: false, type: 'collection', + group: gettext('Foreign Key'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, + control: 'unique-col-collection', + canAdd: true, + columns : ['name', 'columns'], + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + },{ + id: 'check_constraint', label: gettext('Check constraint'), + model: pgBrowser.Nodes['check_constraints'].model, + subnode: pgBrowser.Nodes['check_constraints'].model, + editable: false, type: 'collection', + group: gettext('Check'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, + control: 'unique-col-collection', + canAdd: true, + columns : ['name', 'consrc'] + },{ + id: 'unique_constraint', label: gettext('Unique Constraint'), + model: pgBrowser.Nodes['unique_constraint'].model, + subnode: pgBrowser.Nodes['unique_constraint'].model, + editable: false, type: 'collection', + group: gettext('Unique'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, + control: 'unique-col-collection', + columns : ['name', 'columns'], + canAdd: true, + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + },{ + id: 'exclude_constraint', label: gettext('Exclude constraint'), + model: pgBrowser.Nodes['exclusion_constraint'].model, + subnode: pgBrowser.Nodes['exclusion_constraint'].model, + editable: false, type: 'collection', + group: gettext('Exclude'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, + control: 'unique-col-collection', + columns : ['name', 'columns', 'constraint'], + canAdd: true, + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + }] + },*/{ + id: 'relhasoids', label:gettext('Has OIDs?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + disabled: 'inSchema', group: gettext('Advanced') + },{ + id: 'relpersistence', label:gettext('Unlogged?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + disabled: 'inSchemaWithModelCheck', + group: gettext('Advanced') + },{ + id: 'conname', label:gettext('Primary key'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'reltuples', label:gettext('Rows (estimated)'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'rows_cnt', label:gettext('Rows (counted)'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'relhassubclass', label:gettext('Inherits tables?'), cell: 'switch', + type: 'switch', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'is_sys_table', label:gettext('System table?'), cell: 'switch', + type: 'switch', mode: ['properties'], + disabled: 'inSchema' + }], + validate: function(keys) { + var err = {}, + changedAttrs = this.changed, + msg = undefined, + name = this.get('name'), + schema = this.get('schema'), + relowner = this.get('relowner'); + + // If nothing to validate or VacuumSetting keys then + // return from here + if ( keys && (keys.length == 0 + || _.indexOf(keys, 'autovacuum_enabled') != -1 + || _.indexOf(keys, 'toast_autovacuum_enabled') != -1) ) { + return null; + } + + // Have to clear existing validation before initiating current state validation only + this.errorModel.clear(); + + if (_.isUndefined(name) || _.isNull(name) || + String(name).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table name cannot be empty.'); + this.errorModel.set('name', msg); + return msg; + } else if (_.isUndefined(schema) || _.isNull(schema) || + String(schema).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table schema cannot be empty.'); + this.errorModel.set('schema', msg); + return msg; + } else if (_.isUndefined(relowner) || _.isNull(relowner) || + String(relowner).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table owner cannot be empty.'); + this.errorModel.set('relowner', msg); + return msg; + } + return null; + }, + // We will disable everything if we are under catalog node + inSchema: function() { + if(this.node_info && 'catalog' in this.node_info) + { + return true; + } + return false; + }, + isInheritedTable: function(m) { + if(!m.inSchema.apply(this, [m])) { + if( + (!_.isUndefined(m.get('coll_inherits')) && m.get('coll_inherits').length != 0) + || + (!_.isUndefined(m.get('typname')) && String(m.get('typname')).replace(/^\s+|\s+$/g, '') !== '') + ) { + // Either of_types or coll_inherits has value + return false; + } else { + return true; + } + } + return false; + }, + // Oftype is defined? + checkInheritance: function(m) { + // coll_inherits || typname + if(!m.inSchema.apply(this, [m]) && + ( _.isUndefined(m.get('typname')) || + _.isNull(m.get('typname')) || + String(m.get('typname')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // We will disable Like if ofType is defined + isLikeDisable: function(m) { + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + ( _.isUndefined(m.get('typname')) || + _.isNull(m.get('typname')) || + String(m.get('typname')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // Check for column grid when to Add + check_grid_add_condition: function(m) { + var enable_flag = true; + if(!m.inSchema.apply(this, [m])) { + // if of_type then disable add in grid + if (!_.isUndefined(m.get('typname')) && + !_.isNull(m.get('typname')) && + m.get('typname') !== '') { + enable_flag = false; + } + } + return enable_flag; + }, + // Check for column grid when to edit/delete (for each row) + check_grid_row_edit_delete: function(m) { + var flag = true; + if(!_.isUndefined(m.get('inheritedfrom')) && + !_.isNull(m.get('inheritedfrom')) && + String(m.get('inheritedfrom')).replace(/^\s+|\s+$/g, '') !== '') { + flag = false; + } + return flag; + }, + // We will disable it if Inheritance is defined + checkOfType: function(m) { + //coll_inherits || typname + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + (_.isUndefined(m.get('coll_inherits')) || + _.isNull(m.get('coll_inherits')) || + String(m.get('coll_inherits')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // We will check if we are under schema node & in 'create' mode + inSchemaWithModelCheck: function(m) { + if(this.node_info && 'schema' in this.node_info) + { + // We will disbale control if it's in 'edit' mode + if (m.isNew()) { + return false; + } else { + return true; + } + } + return true; + }, + isTableAutoVacuumEnable: function(m) { + // We need to check additional condition to toggle enable/disable + // for table auto-vacuum + if(!m.inSchema.apply(this, [m]) && + m.get('autovacuum_enabled') === true) { + return false; + } + return true; + }, + isToastTableAutoVacuumEnable: function(m) { + // We need to check additional condition to toggle enable/disable + // for toast table auto-vacuum + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + m.get('toast_autovacuum_enabled') == true) { + return false; + } + return true; + }, + fetch_columns_ajax: function(arg) { + var self = this, + url = 'get_columns', + m = self.model.top || self.model, + old_columns = _.clone(m.get('columns')) + data = undefined, + node = this.field.get('schema_node'), + node_info = this.field.get('node_info'), + full_url = node.generate_url.apply( + node, [ + null, url, this.field.get('node_data'), + this.field.get('url_with_id') || false, node_info + ] + ), + cache_level = this.field.get('cache_level') || node.type, + cache_node = this.field.get('cache_node'); + + cache_node = (cache_node && pgBrowser.Nodes['cache_node']) || node; + + m.trigger('pgadmin:view:fetching', m, self.field); + // Fetching Columns data for the selected table. + $.ajax({ + async: false, + url: full_url, + data: arg, + success: function(res) { + data = cache_node.cache(url, node_info, cache_level, res.data); + }, + error: function() { + m.trigger('pgadmin:view:fetch:error', m, self.field); + } + }); + m.trigger('pgadmin:view:fetched', m, self.field); + data = (data && data.data) || []; + return data; + } + }), + canCreate: function(itemData, item, data) { + //If check is false then , we will allow create menu + if (data && data.check == false) + return true; + + var t = pgBrowser.tree, i = item, d = itemData; + // To iterate over tree to check parent node + while (i) { + // If it is schema then allow user to create table + if (_.indexOf(['schema'], d._type) > -1) + return true; + + if ('coll-table' == d._type) { + //Check if we are not child of catalog + prev_i = t.hasParent(i) ? t.parent(i) : null; + prev_d = prev_i ? t.itemData(prev_i) : null; + if( prev_d._type == 'catalog') { + return false; + } else { + return true; + } + } + i = t.hasParent(i) ? t.parent(i) : null; + d = i ? t.itemData(i) : null; + } + // by default we do not want to allow create menu + return true; + }, + // Check to whether table has disable trigger(s) + canCreate_with_trigger_enable: function(itemData, item, data) { + if(this.canCreate.apply(this, [itemData, item, data])) { + // We are here means we can create menu, now let's check condition + if(itemData.tigger_count > 0) { + return true; + } else { + return false; + } + } + }, + // Check to whether table has enable trigger(s) + canCreate_with_trigger_disable: function(itemData, item, data) { + if(this.canCreate.apply(this, [itemData, item, data])) { + // We are here means we can create menu, now let's check condition + if(itemData.tigger_count > 0 && itemData.has_enable_triggers > 0) { + return true; + } else { + return false; + } + } + } + }); + } + + return pgBrowser.Nodes['partition']; +}); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql new file mode 100644 index 0000000..0661d9c --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql @@ -0,0 +1,2 @@ +ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} ATTACH PARTITION {{conn|qtIdent(data.schema, data.name)}} + {{ data.partition_value }}; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql new file mode 100644 index 0000000..5fa1d7e --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql @@ -0,0 +1,9 @@ +{#=============Checks if it is partitioned table========#} +{% if tid %} +SELECT + CASE WHEN c.relkind = 'p' THEN True ELSE False END As ptable +FROM + pg_class c +WHERE + c.oid = {{ tid }}::oid +{% endif %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql new file mode 100644 index 0000000..e1f9607 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql @@ -0,0 +1,27 @@ +{% import 'table/sql/macros/constraints.macro' as CONSTRAINTS %} +{#===========================================#} +{#====== MAIN TABLE TEMPLATE STARTS HERE ======#} +{#===========================================#} +{### CREATE TABLE STATEMENT FOR partitions ###} +CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data.schema, data.name)}}{% if data.relispartition is defined and data.relispartition %} PARTITION OF {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}}{% endif %} + +{# Macro to render for constraints #} +{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} +( {% endif %} +{% if data.primary_key|length > 0 %}{{CONSTRAINTS.PRIMARY_KEY(conn, data.primary_key[0])}}{% endif %}{% if data.unique_constraint|length > 0 %}{% if data.primary_key|length > 0 %},{% endif %} +{{CONSTRAINTS.UNIQUE(conn, data.unique_constraint)}}{% endif %}{% if data.foreign_key|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 %},{% endif %} +{{CONSTRAINTS.FOREIGN_KEY(conn, data.foreign_key)}}{% endif %}{% if data.check_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 %},{% endif %} +{{CONSTRAINTS.CHECK(conn, data.check_constraint)}}{% endif %}{% if data.exclude_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 %},{% endif %} +{{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %} +{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} + +) +{% endif %} + {{ data.partition_value }}; + +{### Alter SQL for Owner ###} +{% if data.relowner %} + +ALTER TABLE {{conn|qtIdent(data.schema, data.name)}} + OWNER to {{conn|qtIdent(data.relowner)}}; +{% endif %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql new file mode 100644 index 0000000..4b413db --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql @@ -0,0 +1 @@ +ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} DETACH PARTITION {{conn|qtIdent(data.schema, data.name)}}; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql new file mode 100644 index 0000000..476dc3f --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql @@ -0,0 +1,23 @@ +SELECT oid, quote_ident(nspname)||'.'||quote_ident(relname) AS table_name FROM +(SELECT + r.oid, r.relname, n.nspname, array_agg(a.attname) attnames, array_agg(a.atttypid) atttypes +FROM + (SELECT oid, relname, relnamespace FROM pg_catalog.pg_class + WHERE relkind in ('r', 'p') AND NOT relispartition) r + JOIN (SELECT oid AS nspoid, nspname FROM + pg_catalog.pg_namespace WHERE nspname NOT LIKE E'pg\_%') n + ON (r.relnamespace = n.nspoid) + JOIN (SELECT attrelid, attname, atttypid FROM + pg_catalog.pg_attribute WHERE attnum > 0 ORDER BY attrelid, attnum) a + ON (r.oid = a.attrelid) +GROUP BY r.oid, r.relname, r.relnamespace, n.nspname) all_tables +JOIN +(SELECT + attrelid, array_agg(attname) attnames, array_agg(atttypid) atttypes +FROM + (SELECT * FROM pg_catalog.pg_attribute + WHERE attrelid = {{ tid }} AND attnum > 0 + ORDER BY attrelid, attnum) attributes +GROUP BY attrelid) current_table ON current_table.attrelid != all_tables.oid + AND current_table.attnames = all_tables.attnames + AND current_table.atttypes = all_tables.atttypes \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql new file mode 100644 index 0000000..8967478 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql @@ -0,0 +1,14 @@ +SELECT rel.oid, rel.relname AS name, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers, + pg_get_expr(rel.relpartbound, rel.oid) AS partition_value, + rel.relnamespace AS schema_id, + nsp.nspname AS schema_name, + rel.relispartition AS is_partitioned +FROM + (SELECT * FROM pg_inherits WHERE inhparent = {{ tid }}::oid) inh + LEFT JOIN pg_class rel ON inh.inhrelid = rel.oid + LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid + WHERE rel.relispartition + {% if ptid %} AND rel.oid = {{ ptid }}::OID {% endif %} + ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql new file mode 100644 index 0000000..259ae82 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql @@ -0,0 +1,80 @@ +SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str, + (CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE + (SELECT sp.spcname FROM pg_database dtb + JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid + WHERE dtb.oid = {{ did }}::oid) + END) as spcname, + (select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as parent_schema, + nsp.nspname as schema, + pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relispartition, + rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey, + EXISTS(select 1 FROM pg_trigger + JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger' + JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion' + WHERE tgrelid=rel.oid) AS isrepl, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) + ELSE quote_ident(c.relname) END AS inherited_tables + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits, + (SELECT count(*) + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt, + (CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence, + substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor, + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS autovacuum_enabled, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS toast_autovacuum_enabled, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age, + array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str, + array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str, + rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname, + (CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable, + -- Added for pgAdmin4 + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum, + + (SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels, + (CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table, + -- Added for partition table + {% if ptid %} + (CASE WHEN rel.relispartition THEN pg_get_expr(rel.relpartbound, {{ ptid }}::oid) ELSE '' END) AS partition_value, + (SELECT relname FROM pg_class WHERE oid = {{ tid }}::oid) AS partitioned_table_name + {% else %} + pg_get_expr(rel.relpartbound, rel.oid) AS partition_value + {% endif %} + +FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass) + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + LEFT JOIN pg_inherits inh ON inh.inhrelid = rel.oid + LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid +WHERE rel.relispartition AND inh.inhparent = {{ tid }}::oid +{% if ptid %} AND rel.oid = {{ ptid }}::oid {% endif %} +ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/js/table.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/js/table.js index 0b701a4..c751988 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/js/table.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/js/table.js @@ -5,6 +5,323 @@ define('pgadmin.node.table', [ 'pgadmin.node.constraints' ], function(gettext, url_for, $, _, S, pgAdmin, pgBrowser, alertify) { + var PartitionRow = Backgrid.Row.extend({ + modelDuplicateColor: "lightYellow", + + modelUniqueColor: "#fff", + + initialize: function () { + Backgrid.Row.prototype.initialize.apply(this, arguments); + var self = this; + self.model.on("change:is_attach", function() { + setTimeout(function() { + self.columns.each(function(col) { + if (col.get('name') == 'partition_name') { + var idx = self.columns.indexOf(col), + cf = col.get("cellFunction"), + cell = new (cf.apply(col, [self.model]))({ + column: col, + model: self.model + }), + oldCell = self.cells[idx]; + oldCell.remove(); + self.cells[idx] = cell; + self.render(); + } + }); + }, 10); + }); + self.listenTo(self.model, 'pgadmin-session:model:duplicate', self.modelDuplicate); + self.listenTo(self.model, 'pgadmin-session:model:unique', self.modelUnique); + }, + modelDuplicate: function() { + $(this.el).removeClass("new"); + this.el.style.backgroundColor = this.modelDuplicateColor; + }, + modelUnique: function() { + this.el.style.backgroundColor = this.modelUniqueColor; + } + }); + + var getPartitionCell = function(model) { + var is_attach = model.get("is_attach"); + if (is_attach) { + var options = []; + model.set({'partition_name': undefined}, {silent:true}); + _.each(model.top.table_options, function(t) { + options.push([t.label, t.value]); + }); + return Backgrid.Extension.Select2Cell.extend({optionValues: options}); + + } else { + return Backgrid.StringCell; + } + }; + + var PartitionKeyModel = pgBrowser.Node.Model.extend({ + defaults: { + key_type: 'column', + pt_column: undefined, + expression: undefined + }, + keys:['pt_column'], + schema: [{ + id: 'key_type', label:'Key type', type:'select2', editable: true, + cell:'select2', cellHeaderClasses: 'width_percent_25', + select2: {allowClear: false}, + options:[{ + label: 'Column', value: 'column' + },{ + label: 'Expression', value: 'expression' + }] + },{ + id: 'pt_column', label: gettext('Column'), type:'text', + cell: Backgrid.Extension.Select2DepCell.extend({ + keyPathAccessor: function(obj, path) { + var res = obj; + if(_.isArray(res)) { + return _.map(res, function(o) { return o['pt_column'] + }); + } + path = path.split('.'); + for (var i = 0; i < path.length; i++) { + if (_.isNull(res)) return null; + if (_.isEmpty(path[i])) continue; + if (!_.isUndefined(res[path[i]])) res = res[path[i]]; + } + return _.isObject(res) && !_.isArray(res) ? null : res; + }, + initialize: function() { + // Here we will decide if we need to call URL + // Or fetch the data from parent columns collection + var self = this; + if(this.model.handler) { + Backgrid.Extension.Select2DepCell.prototype.initialize.apply(this, arguments); + // Do not listen for any event(s) for existing constraint. + if (_.isUndefined(self.model.get('oid'))) { + var tableCols = self.model.top.get('columns'); + self.listenTo(tableCols, 'remove' , self.resetColOptions); + self.listenTo(tableCols, 'change:name', self.resetColOptions); + } + + self.custom_options(); + } + }, + resetColOptions: function(m) { + var self = this; + + setTimeout(function () { + self.custom_options(); + self.render.apply(self); + }, 50); + }, + custom_options: function() { + // We will add all the columns entered by user in table model + var columns = this.model.top.get('columns'), + added_columns_from_tables = []; + + if (columns.length > 0) { + _.each(columns.models, function(m) { + var col = m.get('name'); + if(!_.isUndefined(col) && !_.isNull(col)) { + added_columns_from_tables.push( + {label: col, value: col, image:'icon-column'} + ); + } + }); + } + // Set the values in to options so that user can select + this.column.set('options', added_columns_from_tables); + }, + remove: function() { + if(this.model.handler) { + var self = this, + tableCols = self.model.top.get('columns'); + self.stopListening(tableCols, 'remove' , self.resetColOptions); + self.stopListening(tableCols, 'change:name' , self.resetColOptions); + Backgrid.Extension.Select2DepCell.prototype.remove.apply(this, arguments); + } + } + }), + deps: ['key_type'], + cellHeaderClasses: 'width_percent_30', + transform : function(data){ + var res = []; + if (data && _.isArray(data)) { + _.each(data, function(d) { + res.push({label: d.label, value: d.label, image:'icon-column'}); + }) + } + return res; + }, + select2:{allowClear:false}, + editable: function(m) { + if (m.get('key_type') == 'expression') { + setTimeout( function() { + m.set('pt_column', undefined); + }, 10); + return false; + } + return true; + } + },{ + id: 'expression', label:'Expression', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_45', + deps: ['key_type'], + editable: function(m) { + if (m.get('key_type') == 'column') { + setTimeout( function() { + m.set('expression', undefined); + }, 10); + return false; + } + return true; + } + } + ], + validate: function(keys) { + var col_type = this.get('key_type'), + pt_column = this.get('pt_column'), + expression = this.get('expression'); + + // Have to clear existing validation before initiating current state + // validation only + this.errorModel.clear(); + + if (_.isUndefined(col_type) || _.isNull(col_type) || + String(col_type).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key type cannot be empty.'); + this.errorModel.set('key_type', msg); + return msg; + } + else if (col_type == 'column' && + _.isUndefined(pt_column) || _.isNull(pt_column) || + String(pt_column).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key column cannot be empty.'); + this.errorModel.set('pt_column', msg); + return msg; + } + else if (col_type == 'expression' && + _.isUndefined(expression) || _.isNull(expression) || + String(expression).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key expression cannot be empty.'); + this.errorModel.set('expression', msg); + return msg; + } + + return null; + } + }); + + var PartitionsModel = pgBrowser.Node.Model.extend({ + defaults: { + oid: undefined, + is_attach: false, + partition_name: undefined, + values_from: undefined, + values_to: undefined, + values_in: undefined + }, + keys:['partition_name'], + schema: [{ + id: 'oid', label: gettext('OID'), type: 'text' + },{ + id: 'is_attach', label:gettext('Mode'), cell: 'switch', + type: 'switch', options: { 'onText': 'Attach', 'offText': 'Create'}, + cellHeaderClasses: 'width_percent_5', + editable: function(m) { + if (m instanceof Backbone.Model && m.isNew() && !m.top.isNew()) + return true; + return false; + } + },{ + id: 'partition_name', label: gettext('Name'), type: 'text', cell:'string', + cellHeaderClasses: 'width_percent_25', + editable: function(m) { + if (m instanceof Backbone.Model && m.isNew()) + return true; + return false; + }, cellFunction: getPartitionCell + },{ + id: 'values_from', label:'From', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_20', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'range' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + },{ + id: 'values_to', label:'To', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_20', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'range' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + },{ + id: 'values_in', label:'In', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_25', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'list' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + }], + validate: function(keys) { + var partition_name = this.get('partition_name'), + values_from = this.get('values_from'), + values_to = this.get('values_to'), + values_in = this.get('values_in'); + + // Have to clear existing validation before initiating current state + // validation only + this.errorModel.clear(); + + if (_.isUndefined(partition_name) || _.isNull(partition_name) || + String(partition_name).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition name cannot be empty.'); + this.errorModel.set('partition_name', msg); + return msg; + } + + if (this.top.get('partition_type') == 'range') { + if (_.isUndefined(values_from) || _.isNull(values_from) || + String(values_from).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For range partition From field cannot be empty.'); + this.errorModel.set('values_from', msg); + return msg; + } else if (_.isUndefined(values_to) || _.isNull(values_to) || + String(values_to).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For range partition To field cannot be empty.'); + this.errorModel.set('values_to', msg); + return msg; + } + } else if (this.top.get('partition_type') == 'list') { + if (_.isUndefined(values_in) || _.isNull(values_in) || + String(values_in).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For list partition In field cannot be empty.'); + this.errorModel.set('values_in', msg); + return msg; + } + } + + return null; + } + }); + if (!pgBrowser.Nodes['coll-table']) { var databases = pgBrowser.Nodes['coll-table'] = pgBrowser.Collection.extend({ @@ -277,7 +594,10 @@ define('pgadmin.node.table', [ hastoasttable: true, toast_autovacuum_enabled: false, autovacuum_enabled: false, - primary_key: [] + primary_key: [], + partitions: [], + partition_type: 'range', + is_partitioned: false }, // Default values! initialize: function(attrs, args) { @@ -323,12 +643,37 @@ define('pgadmin.node.table', [ return (!(d && d.label.match(/pg_global/))) } },{ + id: 'partition', type: 'group', label: gettext('Partition'), + mode: ['edit', 'create'], min_version: 100000, + visible: function(m) { + // Always show in case of create mode + if (m.isNew() || m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'is_partitioned', label:gettext('Partitioned Table?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) + return true; + return false; + } + },{ id: 'description', label: gettext('Comment'), type: 'multiline', mode: ['properties', 'create', 'edit'], disabled: 'inSchema' },{ id: 'coll_inherits', label: gettext('Inherited from table(s)'), url: 'get_inherits', type: 'array', group: gettext('Columns'), - disabled: 'checkInheritance', deps: ['typname'], + disabled: 'checkInheritance', deps: ['typname', 'is_partitioned'], mode: ['create', 'edit'], select2: { multiple: true, allowClear: true, placeholder: gettext('Select to inherit from...')}, @@ -425,7 +770,26 @@ define('pgadmin.node.table', [ model: pgBrowser.Nodes['column'].model, subnode: pgBrowser.Nodes['column'].model, mode: ['create', 'edit'], - disabled: 'inSchema', deps: ['typname'], + disabled: function(m) { + // In case of partitioned table remove inherited columns + if (m.isNew() && m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('columns'); + coll.remove(coll.filter(function(model) { + if (_.isUndefined(model.get('inheritedfrom'))) + return false; + return true; + })); + }, 10); + } + + if(this.node_info && 'catalog' in this.node_info) + { + return true; + } + return false; + }, + deps: ['typname', 'is_partitioned'], canAdd: 'check_grid_add_condition', canEdit: true, canDelete: true, // For each row edit/delete button enable/disable @@ -528,10 +892,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['primary_key'].model, editable: false, type: 'collection', group: gettext('Primary Key'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('primary_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add one primary key var columns = m.get('columns'); @@ -546,9 +922,21 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['foreign_key'].model, editable: false, type: 'collection', group: gettext('Foreign Key'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('foreign_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, columns : ['name', 'columns'], canAddRow: function(m) { // User can only add if there is at least one column with name. @@ -561,7 +949,7 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['check_constraints'].model, editable: false, type: 'collection', group: gettext('Check'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', canAdd: true, columns : ['name', 'consrc'] @@ -571,10 +959,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['unique_constraint'].model, editable: false, type: 'collection', group: gettext('Unique'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('unique_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add if there is at least one column with name. var columns = m.get('columns'); @@ -586,10 +986,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['exclusion_constraint'].model, editable: false, type: 'collection', group: gettext('Exclude'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns', 'constraint'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('exclude_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add if there is at least one column with name. var columns = m.get('columns'); @@ -600,7 +1012,7 @@ define('pgadmin.node.table', [ id: 'typname', label: gettext('Of type'), type: 'text', control: 'node-ajax-options', mode: ['properties', 'create', 'edit'], disabled: 'checkOfType', url: 'get_oftype', group: gettext('Advanced'), - deps: ['coll_inherits'], transform: function(data, cell) { + deps: ['coll_inherits', 'is_partitioned'], transform: function(data, cell) { var control = cell || this, m = control.model; m.of_types_tables = data; @@ -704,10 +1116,180 @@ define('pgadmin.node.table', [ disabled: 'isLikeDisable', group: gettext('Like') }] },{ + id: 'partition_type', label:gettext('Partition Type'), + editable: false, type: 'select2', select2: {allowClear: false}, + group: 'partition', deps: ['is_partitioned'], + options:[{ + label: 'Range', value: 'range' + },{ + label: 'List', value: 'list' + }], + mode:['create'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew() || !m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'partition_keys', label:gettext('Partition Keys'), + model: PartitionKeyModel, + subnode: PartitionKeyModel, + editable: true, type: 'collection', + group: 'partition', mode: ['create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + control: 'sub-node-collection', + canAdd: function(m) { + if (m.isNew() && m.get('is_partitioned')) + return true; + return false; + }, + canAddRow: function(m) { + var columns = m.get('columns'); + var max_row_count = 1000; + + if (m.get('partition_type') && m.get('partition_type') == 'list') + max_row_count = 1; + + return (m.get('partition_keys') && + m.get('partition_keys').length < max_row_count && + _.some(columns.pluck('name')) + ); + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.get('partition_keys') && m.get('partition_keys').models.length > 0) { + setTimeout(function () { + var coll = m.get('partition_keys'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_scheme', label: gettext('Partition Scheme'), + type: 'note', group: 'partition', mode: ['edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) { + this.text = m.get('partition_scheme'); + } + } + },{ + id: 'partitions', label:gettext('Partitions'), + model: PartitionsModel, + subnode: PartitionsModel, + editable: true, type: 'collection', + group: 'partition', mode: ['edit', 'create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + customDeleteTitle: gettext('Detach Partition'), + customDeleteMsg: gettext('Are you sure you wish to detach this partition?'), + columns:['is_attach', 'partition_name', 'values_from', 'values_to', 'values_in'], + control: Backform.SubNodeCollectionControl.extend({ + row:PartitionRow, + initialize: function() { + Backform.SubNodeCollectionControl.prototype.initialize.apply(this, arguments); + var self = this; + if (!this.model.isNew()) { + var node = this.field.get('schema_node'), + node_info = this.field.get('node_info'); + + // Make ajax call to get the tables to be attached + $.ajax({ + url: node.generate_url.apply( + node, [ + null, 'get_attach_tables', this.field.get('node_data'), + true, node_info + ]), + + type: 'GET', + async: false, + success: function(res) { + if (res.success == 1) { + self.model.table_options = res.data; + } + else { + alertify.alert( + 'Error fetching tables to be attached', res.data.result + ); + } + }, + error: function(e) { + var errmsg = $.parseJSON(e.responseText); + alertify.alert('Error fetching tables to be attached.', errmsg.errormsg); + } + }); + } + } + } + ), + canAdd: function(m) { + if (m.get('is_partitioned')) + return true; + return false; + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.isNew() && m.get('partitions') && m.get('partitions').models.length > 0) { + setTimeout(function () { + var coll = m.get('partitions'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_note', label: gettext('Partition'), + type: 'note', group: 'partition', + text: gettext('Above control will be used to Create/Attach/Detach partitions.
' + + '
  • Create Mode: User will be able to create N number of partitions. Mode switch control is disabled in this scenario.
  • ' + + '
  • Edit Mode: User will be able to create/attach/detach N number of partitions.' + + 'In attach mode there will be list of suitable tables to be attached.
'), + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + } + },{ // Here - we will create tab control for storage parameters // (auto vacuum). type: 'nested', control: 'tab', group: gettext('Parameter'), - mode: ['edit', 'create'], + mode: ['edit', 'create'], deps: ['is_partitioned'], schema: Backform.VacuumSettingsSchema },{ id: 'relacl_str', label: gettext('Privileges'), disabled: 'inSchema', @@ -734,7 +1316,9 @@ define('pgadmin.node.table', [ msg = undefined, name = this.get('name'), schema = this.get('schema'), - relowner = this.get('relowner'); + relowner = this.get('relowner'), + is_partitioned = this.get('is_partitioned'), + partition_keys = this.get('partition_keys'); // If nothing to validate or VacuumSetting keys then // return from here @@ -762,6 +1346,12 @@ define('pgadmin.node.table', [ msg = gettext('Table owner cannot be empty.'); this.errorModel.set('relowner', msg); return msg; + } else if (is_partitioned && this.isNew() && + !_.isNull(partition_keys) && partition_keys.length <= 0) + { + msg = gettext('Please specify at least one key for partitioned table.'); + this.errorModel.set('partition_keys', msg); + return msg; } return null; }, @@ -790,7 +1380,15 @@ define('pgadmin.node.table', [ }, // Oftype is defined? checkInheritance: function(m) { - // coll_inherits || typname + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('coll_inherits', []); + }, 10); + return true; + } + + // coll_inherits || typname if(!m.inSchema.apply(this, [m]) && ( _.isUndefined(m.get('typname')) || _.isNull(m.get('typname')) || @@ -834,7 +1432,15 @@ define('pgadmin.node.table', [ }, // We will disable it if Inheritance is defined checkOfType: function(m) { - //coll_inherits || typname + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('typname', undefined); + }, 10); + return true; + } + + //coll_inherits || typname if(!m.inSchemaWithModelCheck.apply(this, [m]) && (_.isUndefined(m.get('coll_inherits')) || _.isNull(m.get('coll_inherits')) || diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql new file mode 100644 index 0000000..eadf9d5 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql @@ -0,0 +1,46 @@ +{### SQL to fetch privileges for tablespace ###} +SELECT 'relacl' as deftype, COALESCE(gt.rolname, 'PUBLIC') grantee, g.rolname grantor, + array_agg(privilege_type) as privileges, array_agg(is_grantable) as grantable +FROM + (SELECT + d.grantee, d.grantor, d.is_grantable, + CASE d.privilege_type + WHEN 'CONNECT' THEN 'c' + WHEN 'CREATE' THEN 'C' + WHEN 'DELETE' THEN 'd' + WHEN 'EXECUTE' THEN 'X' + WHEN 'INSERT' THEN 'a' + WHEN 'REFERENCES' THEN 'x' + WHEN 'SELECT' THEN 'r' + WHEN 'TEMPORARY' THEN 'T' + WHEN 'TRIGGER' THEN 't' + WHEN 'TRUNCATE' THEN 'D' + WHEN 'UPDATE' THEN 'w' + WHEN 'USAGE' THEN 'U' + ELSE 'UNKNOWN' + END AS privilege_type + FROM + (SELECT rel.relacl + FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid + ) acl, + (SELECT (d).grantee AS grantee, (d).grantor AS grantor, (d).is_grantable + AS is_grantable, (d).privilege_type AS privilege_type FROM (SELECT + aclexplode(rel.relacl) as d + FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid + ) a) d + ) d + LEFT JOIN pg_catalog.pg_roles g ON (d.grantor = g.oid) + LEFT JOIN pg_catalog.pg_roles gt ON (d.grantee = gt.oid) +GROUP BY g.rolname, gt.rolname diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql new file mode 100644 index 0000000..f28f298 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql @@ -0,0 +1,17 @@ +{% import 'table/sql/macros/db_catalogs.macro' as CATALOG %} +SELECT c.oid, c.relname , nspname, +CASE WHEN nspname NOT LIKE E'pg\_%' THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) +ELSE quote_ident(c.relname) +END AS inherits +FROM pg_class c +JOIN pg_namespace n +ON n.oid=c.relnamespace +WHERE relkind='r' AND NOT relispartition +{% if not show_system_objects %} +{{ CATALOG.VALID_CATALOGS(server_type) }} +{% endif %} +{% if tid %} +AND c.oid != tid +{% endif %} +ORDER BY relnamespace, c.relname \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql new file mode 100644 index 0000000..f630698 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql @@ -0,0 +1,5 @@ +SELECT rel.oid as tid +FROM pg_class rel +WHERE rel.relkind IN ('r','s','t','p') +AND rel.relnamespace = {{ scid }}::oid +AND rel.relname = {{data.name|qtLiteral}} \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql new file mode 100644 index 0000000..6952c8d --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql @@ -0,0 +1,8 @@ +SELECT + rel.relname AS name +FROM + pg_class rel +WHERE + rel.relkind IN ('r','s','t','p') + AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid; \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql new file mode 100644 index 0000000..fbf9c42 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql @@ -0,0 +1,9 @@ +SELECT rel.oid, rel.relname AS name, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers, + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned +FROM pg_class rel + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND NOT rel.relispartition + {% if tid %} AND rel.oid = {{tid}}::OID {% endif %} + ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql new file mode 100644 index 0000000..1d589df --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql @@ -0,0 +1,73 @@ +SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str, + (CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE + (SELECT sp.spcname FROM pg_database dtb + JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid + WHERE dtb.oid = {{ did }}::oid) + END) as spcname, + (select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as schema, + pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relkind, + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned, + rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey, + EXISTS(select 1 FROM pg_trigger + JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger' + JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion' + WHERE tgrelid=rel.oid) AS isrepl, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) + ELSE quote_ident(c.relname) END AS inherited_tables + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits, + (SELECT count(*) + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt, + (CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence, + substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor, + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS autovacuum_enabled, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS toast_autovacuum_enabled, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age, + array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str, + array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str, + rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname, + (CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable, + -- Added for pgAdmin4 + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum, + + (SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels, + (CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table + -- Added for partition table + {% if tid %}, (CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef({{ tid }}::oid) ELSE '' END) AS partition_scheme {% endif %} +FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass) + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid +WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid +AND NOT rel.relispartition +{% if tid %} AND rel.oid = {{ tid }}::oid {% endif %} +ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql index f8e858f..5ec8ffc 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql @@ -57,7 +57,8 @@ CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data {{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %} {% if data.like_relation or data.coll_inherits or data.columns|length > 0 or data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} -) +){% if data.relkind is defined and data.relkind == 'p' %} PARTITION BY {{ data.partition_scheme }} {% endif %} + {% endif %} {### If we are inheriting it from another table(s) ###} {% if data.coll_inherits %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js index 7716b87..807a264 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js @@ -201,6 +201,17 @@ define('pgadmin.node.trigger', [ mode: ['create','edit', 'properties'], deps: ['is_constraint_trigger'], disabled: function(m) { + // Disabled if table is a partitioned table. + if (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + { + setTimeout(function(){ + m.set('is_row_trigger', false); + },10); + + return true; + } + // If constraint trigger is set to True then row trigger will // automatically set to True and becomes disable var is_constraint_trigger = m.get('is_constraint_trigger'); @@ -227,7 +238,19 @@ define('pgadmin.node.trigger', [ id: 'is_constraint_trigger', label: gettext('Constraint trigger?'), type: 'switch', disabled: 'inSchemaWithModelCheck', mode: ['create','edit', 'properties'], - group: gettext('Definition') + group: gettext('Definition'), + disabled: function(m) { + // Disabled if table is a partitioned table. + if (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + { + setTimeout(function(){ + m.set('is_constraint_trigger', false); + },10); + + return true; + } + } },{ id: 'tgdeferrable', label: gettext('Deferrable?'), type: 'switch', group: gettext('Definition'), diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py new file mode 100644 index 0000000..be80ea6 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py @@ -0,0 +1,1080 @@ +########################################################################## +# +# pgAdmin 4 - PostgreSQL Tools +# +# Copyright (C) 2013 - 2017, The pgAdmin Development Team +# This software is released under the PostgreSQL Licence +# +########################################################################## + +""" Implements Utility class for Table and Partitioned Table. """ + +import re +from functools import wraps +from flask import render_template +from flask_babel import gettext +from pgadmin.utils.ajax import make_json_response, internal_server_error, \ + make_response as ajax_response, gone +from pgadmin.browser.server_groups.servers.databases.schemas.utils \ + import DataTypeReader, trigger_definition, parse_rule_definition +from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \ + parse_priv_to_db +from pgadmin.browser.utils import PGChildNodeView +from pgadmin.utils.driver import get_driver +from config import PG_DEFAULT_DRIVER + + +class BaseTableView(PGChildNodeView): + """ + This class is base class for tables and partitioned tables. + + Methods: + ------- + * check_precondition() + - This function will behave as a decorator which will checks + database connection before running view, it will also attaches + manager,conn & template_path properties to self + + * _formatter(data, tid) + - It will return formatted output of query result + as per client model format + + * _columns_formatter(tid, data): + - It will return formatted output of query result + as per client model format for column node + + * _index_constraints_formatter(self, did, tid, data): + - It will return formatted output of query result + as per client model format for index constraint node + + * _cltype_formatter(type): (staticmethod) + - We need to remove [] from type and append it + after length/precision so we will send flag for + sql template. + + * get_table_dependents(self, tid): + - This function get the dependents and return ajax response + for the table node. + + * get_table_dependencies(self, tid): + - This function get the dependencies and return ajax response + for the table node. + + * get_table_statistics(self, tid): + - Returns the statistics for a particular table if tid is specified, + otherwise it will return statistics for all the tables in that + schema. + * get_reverse_engineered_sql(self, did, scid, tid, main_sql, data): + - This function will creates reverse engineered sql for + the table object. + + * reset_statistics(self, scid, tid): + - This function will reset statistics of table. + """ + @staticmethod + def check_precondition(f): + """ + This function will behave as a decorator which will checks + database connection before running view, it will also attaches + manager,conn & template_path properties to self + """ + + @wraps(f) + def wrap(*args, **kwargs): + # Here args[0] will hold self & kwargs will hold gid,sid,did + self = args[0] + driver = get_driver(PG_DEFAULT_DRIVER) + did = kwargs['did'] + self.manager = driver.connection_manager(kwargs['sid']) + self.conn = self.manager.connection(did=kwargs['did']) + self.qtIdent = driver.qtIdent + self.qtTypeIdent = driver.qtTypeIdent + # We need datlastsysoid to check if current table is system table + self.datlastsysoid = self.manager.db_info[ + did + ]['datlastsysoid'] if self.manager.db_info is not None and \ + did in self.manager.db_info else 0 + + ver = self.manager.version + # Set the template path for the SQL scripts + self.table_template_path = 'table/sql/#{0}#'.format(ver) + self.partition_template_path = 'partition/sql/#{0}#'.format(ver) + + # Template for Column ,check constraint and exclusion + # constraint node + self.column_template_path = 'column/sql/#{0}#'.format(ver) + self.check_constraint_template_path = \ + 'check_constraint/sql/#{0}#'.format(ver) + self.exclusion_constraint_template_path = \ + 'exclusion_constraint/sql/#{0}#'.format(ver) + + # Template for PK & Unique constraint node + self.index_constraint_template_path = 'index_constraint/sql' + + # Template for foreign key constraint node + self.foreign_key_template_path = 'foreign_key/sql/#{0}#'.format(ver) + + # Template for index node + self.index_template_path = 'index/sql/#{0}#'.format(ver) + + # Template for trigger node + self.trigger_template_path = 'trigger/sql/#{0}#'.format(ver) + + # Template for rules node + self.rules_template_path = 'rules/sql' + + # Supported ACL for table + self.acl = ['a', 'r', 'w', 'd', 'D', 'x', 't'] + + # Supported ACL for columns + self.column_acl = ['a', 'r', 'w', 'x'] + + return f(*args, **kwargs) + + return wrap + + def _columns_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for column node + """ + for column in data['columns']: + + # We need to format variables according to client js collection + if 'attoptions' in column and column['attoptions'] is not None: + spcoptions = [] + for spcoption in column['attoptions']: + k, v = spcoption.split('=') + spcoptions.append({'name': k, 'value': v}) + + column['attoptions'] = spcoptions + + # Need to format security labels according to client js collection + if 'seclabels' in column and column['seclabels'] is not None: + seclabels = [] + for seclbls in column['seclabels']: + k, v = seclbls.split('=') + seclabels.append({'provider': k, 'label': v}) + + column['seclabels'] = seclabels + + if 'attnum' in column and column['attnum'] is not None \ + and column['attnum'] > 0: + # We need to parse & convert ACL coming from database to + # json format + SQL = render_template("/".join( + [self.column_template_path, 'acl.sql']), + tid=tid, clid=column['attnum'] + ) + status, acl = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=acl) + + # We will set get privileges from acl sql so we don't need + # it from properties sql + column['attacl'] = [] + + for row in acl['rows']: + priv = parse_priv_from_db(row) + column.setdefault(row['deftype'], []).append(priv) + + # we are receiving request when in edit mode + # we will send filtered types related to current type + present_type = column['cltype'] + + type_id = column['atttypid'] + + fulltype = self.get_full_type( + column['typnspname'], column['typname'], + column['isdup'], column['attndims'], column['atttypmod'] + ) + + length = False + precision = False + if 'elemoid' in column: + length, precision, typeval = \ + self.get_length_precision(column['elemoid']) + + # Set length and precision to None + column['attlen'] = None + column['attprecision'] = None + + # If we have length & precision both + if length and precision: + matchObj = re.search(r'(\d+),(\d+)', fulltype) + if matchObj: + column['attlen'] = matchObj.group(1) + column['attprecision'] = matchObj.group(2) + elif length: + # If we have length only + matchObj = re.search(r'(\d+)', fulltype) + if matchObj: + column['attlen'] = matchObj.group(1) + column['attprecision'] = None + + + SQL = render_template("/".join([self.column_template_path, + 'is_referenced.sql']), + tid=tid, clid=column['attnum']) + + status, is_reference = self.conn.execute_scalar(SQL) + + edit_types_list = list() + # We will need present type in edit mode + + if column['typnspname'] == "pg_catalog" \ + or column['typnspname'] == "public": + edit_types_list.append(present_type) + else: + t = self.qtTypeIdent(self.conn, column['typnspname'], + present_type) + edit_types_list.append(t) + column['cltype'] = t + + if int(is_reference) == 0: + SQL = render_template("/".join([self.column_template_path, + 'edit_mode_types.sql']), + type_id=type_id) + status, rset = self.conn.execute_2darray(SQL) + + for row in rset['rows']: + edit_types_list.append(row['typname']) + else: + edit_types_list.append(present_type) + + column['edit_types'] = edit_types_list + column['cltype'] = DataTypeReader.parse_type_name(column['cltype']) + + if 'indkey' in column: + # Current column + attnum = str(column['attnum']) + + # Single/List of primary key column(s) + indkey = str(column['indkey']) + + # We will check if column is in primary column(s) + if attnum in indkey.split(" "): + column['is_primary_key'] = True + else: + column['is_primary_key'] = False + + return data + + def _index_constraints_formatter(self, did, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for index constraint node + """ + + # We will fetch all the index constraints for the table + index_constraints = { + 'p': 'primary_key', 'u': 'unique_constraint' + } + + for ctype in index_constraints.keys(): + data[index_constraints[ctype]] = [] + + sql = render_template("/".join([self.index_constraint_template_path, + 'properties.sql']), + did=did, tid=tid, + constraint_type=ctype) + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + for row in res['rows']: + result = row + sql = render_template( + "/".join([self.index_constraint_template_path, + 'get_constraint_cols.sql']), + cid=row['oid'], + colcnt=row['indnatts']) + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + for r in res['rows']: + columns.append({"column": r['column'].strip('"')}) + + result['columns'] = columns + + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault(index_constraints[ctype], []).append(result) + + return data + + def _foreign_key_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for foreign key constraint node + """ + + # We will fetch all the index constraints for the table + sql = render_template("/".join([self.foreign_key_template_path, + 'properties.sql']), + tid=tid) + + status, result = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=result) + + for fk in result['rows']: + + sql = render_template("/".join([self.foreign_key_template_path, + 'get_constraint_cols.sql']), + tid=tid, + keys=zip(fk['confkey'], fk['conkey']), + confrelid=fk['confrelid']) + + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + cols = [] + for row in res['rows']: + columns.append({"local_column": row['conattname'], + "references": fk['confrelid'], + "referenced": row['confattname']}) + cols.append(row['conattname']) + + fk['columns'] = columns + + SQL = render_template("/".join([self.foreign_key_template_path, + 'get_parent.sql']), + tid=fk['columns'][0]['references']) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + fk['remote_schema'] = rset['rows'][0]['schema'] + fk['remote_table'] = rset['rows'][0]['table'] + + coveringindex = self.search_coveringindex(tid, cols) + + fk['coveringindex'] = coveringindex + if coveringindex: + fk['autoindex'] = True + fk['hasindex'] = True + else: + fk['autoindex'] = False + fk['hasindex'] = False + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault('foreign_key', []).append(fk) + + return data + + def _check_constraint_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for check constraint node + """ + + # We will fetch all the index constraints for the table + SQL = render_template("/".join([self.check_constraint_template_path, + 'properties.sql']), + tid=tid) + + status, res = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=res) + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + + data['check_constraint'] = res['rows'] + + return data + + def _exclusion_constraint_formatter(self, did, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for exclusion constraint node + """ + + # We will fetch all the index constraints for the table + sql = render_template("/".join([self.exclusion_constraint_template_path, + 'properties.sql']), + did=did, tid=tid) + + status, result = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=result) + + for ex in result['rows']: + + sql = render_template("/".join( + [self.exclusion_constraint_template_path, + 'get_constraint_cols.sql']), + cid=ex['oid'], + colcnt=ex['indnatts']) + + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + for row in res['rows']: + if row['options'] & 1: + order = False + nulls_order = True if (row['options'] & 2) else False + else: + order = True + nulls_order = True if (row['options'] & 2) else False + + columns.append({"column": row['coldef'].strip('"'), + "oper_class": row['opcname'], + "order": order, + "nulls_order": nulls_order, + "operator": row['oprname'], + "col_type": row['datatype'] + }) + + ex['columns'] = columns + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault('exclude_constraint', []).append(ex) + + return data + + def _formatter(self, did, scid, tid, data): + """ + Args: + data: dict of query result + scid: schema oid + tid: table oid + + Returns: + It will return formatted output of query result + as per client model format + """ + # Need to format security labels according to client js collection + if 'seclabels' in data and data['seclabels'] is not None: + seclabels = [] + for seclbls in data['seclabels']: + k, v = seclbls.split('=') + seclabels.append({'provider': k, 'label': v}) + + data['seclabels'] = seclabels + + # We need to parse & convert ACL coming from database to json format + SQL = render_template("/".join([self.table_template_path, 'acl.sql']), + tid=tid, scid=scid) + status, acl = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=acl) + + # We will set get privileges from acl sql so we don't need + # it from properties sql + for row in acl['rows']: + priv = parse_priv_from_db(row) + if row['deftype'] in data: + data[row['deftype']].append(priv) + else: + data[row['deftype']] = [priv] + + # We will add Auto vacuum defaults with out result for grid + data['vacuum_table'] = self.parse_vacuum_data(self.conn, data, 'table') + data['vacuum_toast'] = self.parse_vacuum_data(self.conn, data, 'toast') + + # Fetch columns for the table logic + # + # 1) Check if of_type and inherited tables are present? + # 2) If yes then Fetch all the columns for of_type and inherited tables + # 3) Add columns in columns collection + # 4) Find all the columns for tables and filter out columns which are + # not inherited from any table & format them one by one + + # Get of_type table columns and add it into columns dict + if data['typname']: + SQL = render_template("/".join([self.table_template_path, + 'get_columns_for_table.sql']), + tname=data['typname']) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + data['columns'] = res['rows'] + + # Get inherited table(s) columns and add it into columns dict + elif data['coll_inherits'] and len(data['coll_inherits']) > 0: + columns = [] + # Return all tables which can be inherited & do not show + # system columns + SQL = render_template("/".join([self.table_template_path, + 'get_inherits.sql']), + show_system_objects=False + ) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + if row['inherits'] in data['coll_inherits']: + # Fetch columns using inherited table OID + SQL = render_template("/".join( + [self.table_template_path, + 'get_columns_for_table.sql']), + tid=row['oid'] + ) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + columns.extend(res['rows'][:]) + data['columns'] = columns + + # We will fetch all the columns for the table using + # columns properties.sql, so we need to set template path + SQL = render_template("/".join([self.column_template_path, + 'properties.sql']), + tid=tid, + show_sys_objects=False + ) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + all_columns = res['rows'] + + # Filter inherited columns from all columns + if 'columns' in data and len(data['columns']) > 0 \ + and len(all_columns) > 0: + for row in data['columns']: + for i, col in enumerate(all_columns): + # If both name are same then remove it + # as it is inherited from other table + if col['name'] == row['name']: + # Remove same column from all_columns as + # already have it columns collection + del all_columns[i] + + # If any column is added then update columns collection + if len(all_columns) > 0: + data['columns'] += all_columns + # If no inherited columns found then add all columns + elif len(all_columns) > 0: + data['columns'] = all_columns + + if 'columns' in data and len(data['columns']) > 0: + data = self._columns_formatter(tid, data) + + # Here we will add constraint in our output + data = self._index_constraints_formatter(did, tid, data) + data = self._foreign_key_formatter(tid, data) + data = self._check_constraint_formatter(tid, data) + data = self._exclusion_constraint_formatter(did, tid, data) + + return data + + @staticmethod + def _cltype_formatter(data_type): + """ + + Args: + data_type: Type string + + Returns: + We need to remove [] from type and append it + after length/precision so we will send flag for + sql template + """ + if '[]' in data_type: + return data_type[:-2], True + else: + return data_type, False + + def get_table_dependents(self, tid): + """ + This function get the dependents and return ajax response + for the table node. + + Args: + tid: Table ID + """ + # Specific condition for column which we need to append + where = "WHERE dep.refobjid={0}::OID".format(tid) + + dependents_result = self.get_dependents( + self.conn, tid + ) + + # Specific sql to run againt column to fetch dependents + SQL = render_template("/".join([self.table_template_path, + 'depend.sql']), where=where) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + for row in res['rows']: + ref_name = row['refname'] + if ref_name is None: + continue + + dep_type = '' + dep_str = row['deptype'] + if dep_str == 'a': + dep_type = 'auto' + elif dep_str == 'n': + dep_type = 'normal' + elif dep_str == 'i': + dep_type = 'internal' + + dependents_result.append({'type': 'sequence', 'name': ref_name, + 'field': dep_type}) + + return ajax_response( + response=dependents_result, + status=200 + ) + + def get_table_dependencies(self, tid): + """ + This function get the dependencies and return ajax response + for the table node. + + Args: + tid: Table ID + + """ + dependencies_result = self.get_dependencies( + self.conn, tid + ) + + return ajax_response( + response=dependencies_result, + status=200 + ) + + def get_table_statistics(self, scid, tid): + """ + Statistics + + Args: + scid: Schema Id + tid: Table Id + + Returns the statistics for a particular table if tid is specified, + otherwise it will return statistics for all the tables in that + schema. + """ + + # Fetch schema name + status, schema_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_schema.sql']), + conn=self.conn, scid=scid + ) + ) + if not status: + return internal_server_error(errormsg=schema_name) + + if tid is None: + status, res = self.conn.execute_dict( + render_template( + "/".join([self.table_template_path, + 'coll_table_stats.sql']), conn=self.conn, + schema_name=schema_name + ) + ) + else: + # For Individual table stats + + # Check if pgstattuple extension is already created? + # if created then only add extended stats + status, is_pgstattuple = self.conn.execute_scalar(""" + SELECT (count(extname) > 0) AS is_pgstattuple + FROM pg_extension + WHERE extname='pgstattuple' + """) + if not status: + return internal_server_error(errormsg=is_pgstattuple) + + # Fetch Table name + status, table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=table_name) + + status, res = self.conn.execute_dict( + render_template( + "/".join([self.table_template_path, 'stats.sql']), + conn=self.conn, schema_name=schema_name, + table_name=table_name, + is_pgstattuple=is_pgstattuple, tid=tid + ) + ) + + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + data=res, + status=200 + ) + + def get_reverse_engineered_sql(self, did, scid, tid, main_sql, data): + """ + This function will creates reverse engineered sql for + the table object + + Args: + did: Database ID + scid: Schema ID + tid: Table ID + main_sql: List contains all the reversed engineered sql + data: Table's Data + """ + """ + ##################################### + # 1) Reverse engineered sql for TABLE + ##################################### + """ + + # Table & Schema declaration so that we can use them in child nodes + schema = data['schema'] + table = data['name'] + + data = self._formatter(did, scid, tid, data) + + # Now we have all lis of columns which we need + # to include in our create definition, Let's format them + if 'columns' in data: + for c in data['columns']: + if 'attacl' in c: + c['attacl'] = parse_priv_to_db(c['attacl'], self.column_acl) + + # check type for '[]' in it + if 'cltype' in c: + c['cltype'], c['hasSqrBracket'] = \ + self._cltype_formatter(c['cltype']) + + sql_header = u"-- Table: {0}\n\n-- ".format( + self.qtIdent(self.conn, data['schema'], data['name'])) + + sql_header += render_template("/".join([self.table_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + sql_header = sql_header.strip('\n') + sql_header += '\n' + + # Add into main sql + main_sql.append(sql_header) + + # Parse privilege data + if 'relacl' in data: + data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) + + # if table is partitions then + if 'relispartition' in data and data['relispartition']: + table_sql = render_template("/".join([self.partition_template_path, + 'create.sql']), + data=data, conn=self.conn) + else: + table_sql = render_template("/".join([self.table_template_path, + 'create.sql']), + data=data, conn=self.conn) + + # Add into main sql + table_sql = re.sub('\n{2,}', '\n\n', table_sql) + main_sql.append(table_sql.strip('\n')) + + """ + ###################################### + # 2) Reverse engineered sql for INDEX + ###################################### + """ + + SQL = render_template("/".join([self.index_template_path, + 'nodes.sql']), tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + + SQL = render_template("/".join([self.index_template_path, + 'properties.sql']), + did=did, tid=tid, idx=row['oid'], + datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + data = dict(res['rows'][0]) + # Adding parent into data dict, will be using it while creating sql + data['schema'] = schema + data['table'] = table + # We also need to fecth columns of index + SQL = render_template("/".join([self.index_template_path, + 'column_details.sql']), + idx=row['oid']) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + # 'attdef' comes with quotes from query so we need to strip them + # 'options' we need true/false to render switch + # ASC(false)/DESC(true) + columns = [] + cols = [] + for col_row in rset['rows']: + # We need all data as collection for ColumnsModel + cols_data = { + 'colname': col_row['attdef'].strip('"'), + 'collspcname': col_row['collnspname'], + 'op_class': col_row['opcname'], + } + if col_row['options'][0] == 'DESC': + cols_data['sort_order'] = True + columns.append(cols_data) + + # We need same data as string to display in properties window + # If multiple column then separate it by colon + cols_str = col_row['attdef'] + if col_row['collnspname']: + cols_str += ' COLLATE ' + col_row['collnspname'] + if col_row['opcname']: + cols_str += ' ' + col_row['opcname'] + if col_row['options'][0] == 'DESC': + cols_str += ' DESC' + cols.append(cols_str) + + # Push as collection + data['columns'] = columns + # Push as string + data['cols'] = ', '.join(cols) + + sql_header = u"\n-- Index: {0}\n\n-- ".format(data['name']) + + sql_header += render_template("/".join([self.index_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + index_sql = render_template("/".join([self.index_template_path, + 'create.sql']), + data=data, conn=self.conn) + index_sql += "\n" + index_sql += render_template("/".join([self.index_template_path, + 'alter.sql']), + data=data, conn=self.conn) + + # Add into main sql + index_sql = re.sub('\n{2,}', '\n\n', index_sql) + main_sql.append(sql_header + '\n\n' + index_sql.strip('\n')) + + """ + ######################################## + # 3) Reverse engineered sql for TRIGGERS + ######################################## + """ + SQL = render_template("/".join([self.trigger_template_path, + 'nodes.sql']), tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + trigger_sql = '' + + SQL = render_template("/".join([self.trigger_template_path, + 'properties.sql']), + tid=tid, trid=row['oid'], + datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + data = dict(res['rows'][0]) + # Adding parent into data dict, will be using it while creating sql + data['schema'] = schema + data['table'] = table + + if data['tgnargs'] > 1: + # We know that trigger has more than 1 arguments, + # let's join them + data['tgargs'] = ', '.join(data['tgargs']) + + if len(data['tgattr']) > 1: + columns = ', '.join(data['tgattr'].split(' ')) + + SQL = render_template("/".join([self.trigger_template_path, + 'get_columns.sql']), + tid=tid, clist=columns) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + # 'tgattr' contains list of columns from table used in trigger + columns = [] + + for col_row in rset['rows']: + columns.append({'column': col_row['name']}) + + data['columns'] = columns + + data = trigger_definition(data) + + sql_header = u"\n-- Trigger: {0}\n\n-- ".format(data['name']) + + sql_header += render_template("/".join([self.trigger_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + # If the request for new object which do not have did + trigger_sql = render_template("/".join([self.trigger_template_path, + 'create.sql']), + data=data, conn=self.conn) + + trigger_sql = sql_header + '\n\n' + trigger_sql.strip('\n') + + # If trigger is disabled then add sql code for the same + if not data['is_enable_trigger']: + trigger_sql += '\n\n' + trigger_sql += render_template("/".join([ + self.trigger_template_path, + 'enable_disable_trigger.sql']), + data=data, conn=self.conn) + + # Add into main sql + trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql) + main_sql.append(trigger_sql) + + """ + ##################################### + # 4) Reverse engineered sql for RULES + ##################################### + """ + + SQL = render_template("/".join( + [self.rules_template_path, 'properties.sql']), tid=tid) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + rules_sql = '\n' + SQL = render_template("/".join( + [self.rules_template_path, 'properties.sql'] + ), rid=row['oid'], datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + res_data = parse_rule_definition(res) + rules_sql += render_template("/".join( + [self.rules_template_path, 'create.sql']), + data=res_data, display_comments=True) + + # Add into main sql + rules_sql = re.sub('\n{2,}', '\n\n', rules_sql) + main_sql.append(rules_sql) + + """ + ########################################## + # 5) Reverse engineered sql for PARTITIONS + ########################################## + """ + if 'is_partitioned' in data and data['is_partitioned']: + SQL = render_template("/".join([self.partition_template_path, + 'nodes.sql']), + scid=scid, tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + sql_header = u"\n-- Partitions SQL" + partition_sql = '' + for row in rset['rows']: + part_data = dict() + part_data['partitioned_table_name'] = data['name'] + part_data['parent_schema'] = data['schema'] + part_data['schema'] = row['schema_name'] + part_data['relispartition'] = True + part_data['name'] = row['name'] + part_data['partition_value'] = row['partition_value'] + + partition_sql += render_template("/".join( + [self.partition_template_path, 'create.sql']), + data=part_data, conn=self.conn) + + # Add into main sql + partition_sql = re.sub('\n{2,}', '\n\n', partition_sql) + main_sql.append(sql_header + '\n\n' + partition_sql.strip('\n')) + + sql = '\n'.join(main_sql) + + return ajax_response(response=sql.strip('\n')) + + def reset_statistics(self, scid, tid): + """ + This function will reset statistics of table + + Args: + scid: Schema ID + tid: Table ID + """ + try: + SQL = render_template("/".join([self.table_template_path, + 'reset_stats.sql']), + tid=tid) + status, res = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + success=1, + info=gettext("Table statistics have been reset"), + data={ + 'id': tid, + 'scid': scid + } + ) + + except Exception as e: + return internal_server_error(errormsg=str(e)) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/templates/schema/js/schema.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/templates/schema/js/schema.js index 0e90997..e0a0fcd 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/templates/schema/js/schema.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/templates/schema/js/schema.js @@ -145,6 +145,16 @@ define('pgadmin.node.schema', [ group: gettext('Table'), mode: ['edit', 'create'], type: 'switch', disabled: function(m) { + // If table is partitioned table then disabled it. + if (m.top && m.top.get('is_partitioned')) { + // We also need to unset rest of all + setTimeout(function() { + m.set('autovacuum_custom', false); + }, 10); + + return true; + } + if(!m.top.inSchema.apply(this, [m])) { return false; } diff --git a/web/pgadmin/static/js/backform.pgadmin.js b/web/pgadmin/static/js/backform.pgadmin.js index a82205e..cd8f588 100644 --- a/web/pgadmin/static/js/backform.pgadmin.js +++ b/web/pgadmin/static/js/backform.pgadmin.js @@ -1022,16 +1022,16 @@ var collection = this.model.get(data.name); - var cellEditing = function(args){ - var self = this, + var cellEditing = function(args) { + var that = this, cell = args[0]; // Search for any other rows which are open. this.each(function(m){ // Check if row which we are about to close is not current row. if (cell.model != m) { - var idx = self.indexOf(m); + var idx = that.indexOf(m); if (idx > -1) { - var row = grid.body.rows[idx], + var row = self.grid.body.rows[idx], editCell = row.$el.find(".subnode-edit-in-process").parent(); // Only close row if it's open. if (editCell.length > 0){ @@ -1053,7 +1053,7 @@ }); // Render subNode grid - subNodeGrid = grid.render().$el; + subNodeGrid = self.grid.render().$el; // Combine Edit and Delete Cell if (data.canDelete && data.canEdit) { @@ -1071,7 +1071,7 @@ data.canAddRow.apply(self, [self.model]) : true; if (canAddRow) { // Close any existing expanded row before adding new one. - _.each(grid.body.rows, function(row){ + _.each(self.grid.body.rows, function(row){ var editCell = row.$el.find(".subnode-edit-in-process").parent(); // Only close row if it's open. if (editCell.length > 0){ @@ -1100,7 +1100,7 @@ } } - $(grid.body.$el.find($("tr.new"))).removeClass("new") + $(self.grid.body.$el.find($("tr.new"))).removeClass("new") var m = new (data.model) (null, { silent: true, handler: collection, @@ -1111,7 +1111,7 @@ collection.add(m); var idx = collection.indexOf(m), - newRow = grid.body.rows[idx].$el; + newRow = self.grid.body.rows[idx].$el; newRow.addClass("new"); $(newRow).pgMakeVisible('backform-tab'); @@ -1149,6 +1149,7 @@ }); var SubNodeCollectionControl = Backform.SubNodeCollectionControl = Backform.Control.extend({ + row: Backgrid.Row, render: function() { var field = _.defaults(this.field.toJSON(), this.defaults), attributes = this.model.toJSON(), @@ -1179,6 +1180,12 @@ // Show Backgrid Control grid = (data.subnode == undefined) ? "" : this.showGridControl(data); + // Clean up first + this.$el.removeClass(Backform.hiddenClassname); + + if (!data.visible) + this.$el.addClass(Backform.hiddenClassname); + this.$el.html(grid).addClass(field.name); this.updateInvalid(); @@ -1246,7 +1253,9 @@ name: "pg-backform-delete", label: "", cell: Backgrid.Extension.DeleteCell, editable: false, cell_priority: -1, - canDeleteRow: data.canDeleteRow + canDeleteRow: data.canDeleteRow, + customDeleteMsg: data.customDeleteMsg, + customDeleteTitle: data.customDeleteTitle }); } @@ -1303,6 +1312,7 @@ var grid = self.grid = new Backgrid.Grid({ columns: gridSchema.columns, collection: collection, + row: this.row, className: "backgrid table-bordered" }); diff --git a/web/pgadmin/static/js/backgrid.pgadmin.js b/web/pgadmin/static/js/backgrid.pgadmin.js index 64f0cb6..d426a82 100644 --- a/web/pgadmin/static/js/backgrid.pgadmin.js +++ b/web/pgadmin/static/js/backgrid.pgadmin.js @@ -380,7 +380,8 @@ var DeleteCell = Backgrid.Extension.DeleteCell = Backgrid.Cell.extend({ defaults: _.defaults({ - defaultDeleteMsg: 'Are you sure you wish to delete this row?' + defaultDeleteMsg: 'Are you sure you wish to delete this row?', + defaultDeleteTitle: 'Delete Row' }, Backgrid.Cell.prototype.defaults), /** @property */ @@ -399,8 +400,10 @@ if (canDeleteRow) { var delete_msg = !_.isUndefined(this.column.get('customDeleteMsg')) ? this.column.get('customDeleteMsg'): that.defaults.defaultDeleteMsg; + var delete_title = !_.isUndefined(this.column.get('customDeleteTitle')) ? + this.column.get('customDeleteTitle'): that.defaults.defaultDeleteTitle; Alertify.confirm( - 'Delete Row', + delete_title, delete_msg, function(evt) { that.model.collection.remove(that.model); @@ -1378,6 +1381,58 @@ _.extend(MomentCell.prototype, MomentFormatter.prototype.defaults); + + Backgrid.Extension.StringDepCell = Backgrid.StringCell.extend({ + initialize: function() { + Backgrid.StringCell.prototype.initialize.apply(this, arguments); + Backgrid.Extension.DependentCell.prototype.initialize.apply(this, arguments); + }, + dependentChanged: function () { + this.$el.empty(); + + var self = this, + model = this.model, + column = this.column, + editable = this.column.get("editable"); + + this.render(); + + is_editable = _.isFunction(editable) ? !!editable.apply(column, [model]) : !!editable; + setTimeout(function() { + self.$el.removeClass("editor"); + if (is_editable){ self.$el.addClass("editable"); } + else { self.$el.removeClass("editable"); } + }, 10); + + this.delegateEvents(); + return this; + }, + remove: Backgrid.Extension.DependentCell.prototype.remove + }); + + Backgrid.Extension.Select2DepCell = Backgrid.Extension.Select2Cell.extend({ + initialize: function() { + Backgrid.Extension.Select2Cell.prototype.initialize.apply(this, arguments); + Backgrid.Extension.DependentCell.prototype.initialize.apply(this, arguments); + }, + + dependentChanged: function () { + var model = this.model; + var column = this.column; + editable = this.column.get("editable"); + + this.render(); + + is_editable = _.isFunction(editable) ? !!editable.apply(column, [model]) : !!editable; + if (is_editable){ this.$el.addClass("editable"); } + else { this.$el.removeClass("editable"); } + + this.delegateEvents(); + return this; + }, + remove: Backgrid.Extension.DependentCell.prototype.remove + }); + return Backgrid; })); diff --git a/web/pgadmin/tools/backup/templates/backup/js/backup.js b/web/pgadmin/tools/backup/templates/backup/js/backup.js index 874bbeb..8e96d1b 100644 --- a/web/pgadmin/tools/backup/templates/backup/js/backup.js +++ b/web/pgadmin/tools/backup/templates/backup/js/backup.js @@ -269,7 +269,7 @@ TODO LIST FOR BACKUP: // Define list of nodes on which backup context menu option appears var backup_supported_nodes = [ - 'database', 'schema', 'table' + 'database', 'schema', 'table', 'partition' ]; /** diff --git a/web/pgadmin/tools/datagrid/__init__.py b/web/pgadmin/tools/datagrid/__init__.py index f4629fc..f8a7e4c 100644 --- a/web/pgadmin/tools/datagrid/__init__.py +++ b/web/pgadmin/tools/datagrid/__init__.py @@ -112,6 +112,10 @@ def initialize_datagrid(cmd_type, obj_type, sid, did, obj_id): return internal_server_error(errormsg=str(msg)) try: + # if object type is partition then it is nothing but a table. + if obj_type == 'partition': + obj_type = 'table' + # Get the object as per the object type command_obj = ObjectRegistry.get_object(obj_type, conn_id=conn_id, sid=sid, did=did, obj_id=obj_id, cmd_type=cmd_type, diff --git a/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js b/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js index 7c48ea5..63067f0 100644 --- a/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js +++ b/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js @@ -28,7 +28,7 @@ define([ // Define list of nodes on which view data option appears var supported_nodes = [ 'table', 'view', 'mview', - 'foreign-table', 'catalog_object' + 'foreign-table', 'catalog_object', 'partition' ], /* Enable/disable View data menu in tools based diff --git a/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js b/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js index f3823f9..13b744a 100644 --- a/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js +++ b/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js @@ -147,7 +147,7 @@ define([ var maintenance_supported_nodes = [ 'database', 'table', 'primary_key', - 'unique_constraint', 'index' + 'unique_constraint', 'index', 'partition' ]; /** diff --git a/web/pgadmin/tools/restore/templates/restore/js/restore.js b/web/pgadmin/tools/restore/templates/restore/js/restore.js index f821162..2f212ac 100644 --- a/web/pgadmin/tools/restore/templates/restore/js/restore.js +++ b/web/pgadmin/tools/restore/templates/restore/js/restore.js @@ -227,7 +227,8 @@ define([ var restore_supported_nodes = [ 'database', 'schema', 'table', 'function', - 'trigger', 'index' + 'trigger', 'index', + 'partition' ]; /** diff --git a/web/pgadmin/tools/user_management/templates/user_management/js/user_management.js b/web/pgadmin/tools/user_management/templates/user_management/js/user_management.js index f11b552..68e51f5 100644 --- a/web/pgadmin/tools/user_management/templates/user_management/js/user_management.js +++ b/web/pgadmin/tools/user_management/templates/user_management/js/user_management.js @@ -25,34 +25,7 @@ define([ // How long to wait after typing has stopped before searching can start wait: 150 })); - }, - StringDepCell = Backgrid.StringCell.extend({ - initialize: function() { - Backgrid.StringCell.prototype.initialize.apply(this, arguments); - Backgrid.Extension.DependentCell.prototype.initialize.apply(this, arguments); - }, - dependentChanged: function () { - this.$el.empty(); - - var self = this, - model = this.model, - column = this.column, - editable = this.column.get("editable"); - - this.render(); - - is_editable = _.isFunction(editable) ? !!editable.apply(column, [model]) : !!editable; - setTimeout(function() { - self.$el.removeClass("editor"); - if (is_editable){ self.$el.addClass("editable"); } - else { self.$el.removeClass("editable"); } - }, 10); - - this.delegateEvents(); - return this; - }, - remove: Backgrid.Extension.DependentCell.prototype.remove - }); + } pgBrowser.UserManagement = { init: function() { @@ -83,7 +56,7 @@ define([ schema: [ { id: 'email', label: gettext('Email'), type: 'text', - cell:StringDepCell, cellHeaderClasses:'width_percent_30', + cell:Backgrid.Extension.StringDepCell, cellHeaderClasses:'width_percent_30', deps: ['id'], editable: function(m) { if(m instanceof Backbone.Collection) {