Re: Testing with concurrent sessions - Mailing list pgsql-hackers
From | Markus Wanner |
---|---|
Subject | Re: Testing with concurrent sessions |
Date | |
Msg-id | 4B4F79B3.1030303@bluegap.ch Whole thread Raw |
In response to | Re: Testing with concurrent sessions (Markus Wanner <markus@bluegap.ch>) |
Responses |
Re: Testing with concurrent sessions
Re: Testing with concurrent sessions |
List | pgsql-hackers |
Hi, Markus Wanner wrote: > Sorry, if that didn't get clear. I'm trying to put together something I > can release real soon now (tm). I'll keep you informed. Okay, here we go: dtester version 0.0. This emerged out of Postgres-R, where I don't just need to test multiple client connections, but multiple postmasters interacting with each other. None the less, it may be suitable for other needs as well, especially testing with concurrent sessions. I've decided to release this as a separate project named dtester, as proposed by Michael Tan (thanks for your inspiration). It's certainly missing lots of things, mainly documentation. However, I've attached a patch which integrates nicely into the Postgres Makefiles, so you just need to say: make dcheck. That very same patch includes a test case with three concurrent transactions with circular dependencies, where the current SERIALIZABLE isolation level fails to provide serializability. Installing dtester itself is as simple as 'python setup.py install' in the extracted archive's directory. Go try it, read the code and simply ask, if you get stuck. I'll try to come up with some more documentation and such... Regards Markus Wanner # # old_revision [651e6b451b14a926bbca5ac0b308bc5f979c8c8e] # # add_file "src/test/regress/pg_dtester.py.in" # content [edcf9857ee4d884b6120b72bf319774126241ee3] # # patch "GNUmakefile.in" # from [da68d137cee6cec6458a1d9e877c5b623a11415a] # to [b605a66cee8f77978da09c326cf9dbaeba464bd2] # # patch "src/test/regress/GNUmakefile" # from [ed08fe1be025ede31fa2dba35f81f653809a2096] # to [55ae74cf991ae07b0d5b082882ff5ba1f1ef4036] # ============================================================ --- src/test/regress/pg_dtester.py.in edcf9857ee4d884b6120b72bf319774126241ee3 +++ src/test/regress/pg_dtester.py.in edcf9857ee4d884b6120b72bf319774126241ee3 @@ -0,0 +1,762 @@ +#!/usr/bin/python + +#------------------------------------------------------------------------- +# +# dtester.py.in +# +# Sample test suite running two concurrent transactions, showing +# off some capabilities of dtester. +# +# Copyright (c) 2006-2010, Markus Wanner +# +#------------------------------------------------------------------------- + +import re, os, sys, getopt +from twisted.internet import defer, reactor + +from dtester.events import EventMatcher, EventSource, Event, \ + ProcessOutputEvent, ProcessErrorEvent, ProcessEndedEvent +from dtester.exceptions import TestAborted +from dtester.test import TestSuite, BaseTest, SyncTest +from dtester.reporter import StreamReporter +from dtester.runner import Runner, Timeout + +# ****** definition of tests and suites *********************************** + +class InstallationSuite(TestSuite): + + setUpDescription = "creating temporary installation" + tearDownDescription = "removing temporary installation" + + needs = (('shell', "IShell or something"),) + + def setUp(self): + # inherit getConfig from the shell + setattr(self, 'getConfig', self.shell.getConfig) + setattr(self, 'runCommand', self.shell.runCommand) + setattr(self, 'recursive_remove', self.shell.recursive_remove) + + # (re) create an installation directory + self.pg_inst_dir = self.shell.getConfig('inst_dir') + if os.path.exists(self.pg_inst_dir): + self.shell.recursive_remove(self.pg_inst_dir) + os.mkdir(self.pg_inst_dir) + + # install into that directory + proc = self.shell.runCommand('make', 'make', + args=['make', '-C', self.shell.getConfig('top-builddir'), + 'DESTDIR=%s' % self.pg_inst_dir, 'install', + 'with_perl=no', 'with_python=no'], + lineBasedOutput=True) + + d = self.waitFor(proc, EventMatcher(ProcessEndedEvent)) + d.addCallback(self.makeTerminated) + proc.start() + + # FIXME: how to properly handle these? + self.shell.addEnvPath(self.shell.getConfig('bindir')) + self.shell.addEnvLibraryPath(self.shell.getConfig('libdir')) + return d + + def makeTerminated(self, event): + if event.exitCode != 0: + raise Exception("Initdb returned %d" % event.exitCode) + else: + return True + + def tearDown(self): + # The installation procedure should be able to simply override any + # formerly installed files, so we save the time to clean up the + # installation directory. + return + + +class InitdbSuite(TestSuite): + + args = (('number', int), ) + needs = (('shell', "IShell or something"),) + + def setUpDescription(self): + return "initializing database system %d" % self.number + + def tearDownDescription(self): + return "removing database system %d" % self.number + + def getNumber(self): + return self.number + + def getDir(self): + return self.dbdir + + def setUp(self): + self.dbdir = "%s%d" % \ + (self.shell.getConfig('pgdata_prefix'), self.number) + proc = self.shell.runCommand( + 'initdb-%d' % self.number, + 'initdb', args = [ + 'initdb', '-D', self.dbdir, + '-A', 'trust', '--noclean'], + lineBasedOutput=True) + + d = defer.Deferred() + proc.addHook(EventMatcher(ProcessEndedEvent), + self.initdb_terminated, d) + proc.start() + return d + + def initdb_terminated(self, event, d): + if event.exitCode != 0: + d.errback(Exception("Initdb returned %d" % event.exitCode)) + else: + d.callback(True) + + def tearDown(self): + self.shell.recursive_remove( + "%s%d" % (self.shell.getConfig('pgdata_prefix'), self.number)) + + +class PostmasterSuite(TestSuite): + + needs = (('shell', "IShell or something"), + ('dbdir', "IDatabaseDir"),) + + def setUpDescription(self): + return "starting database system %d" % self.dbdir.getNumber() + + def tearDownDescription(self): + return "stopping database system %d" % self.dbdir.getNumber() + + def getPort(self): + return self.port + + def setUp(self): + setattr(self, 'getNumber', self.dbdir.getNumber) + + self.port = self.shell.getConfig('temp-port') + self.dbdir.getNumber() + self.postmaster = self.shell.runCommand( + 'postmaster%d' % self.dbdir.getNumber(), + 'postmaster', + # FIXME: -A1 doesn't exist if assertions are disabled + args = ['postmaster', '-A1', '-d5', + '-D', self.dbdir.getDir(), + '-i', '-p', str(self.port)], + lineBasedOutput=True) + + d = defer.Deferred() + self.readyHook = \ + self.postmaster.addHook(EventMatcher(ProcessErrorEvent, + "database system is ready to accept connections"), + self.postmaster_ready, d) + + self.unexpectedTerminationHook = \ + self.postmaster.addHook(EventMatcher(ProcessEndedEvent), + self.postmaster_terminated) + self.postmaster.start() + return d + + def postmaster_ready(self, event, d): + # it's sufficient if we're called once + self.postmaster.removeHook(self.readyHook) + d.callback(None) + + def postmaster_terminated(self, event): + exitCode = 'undef' + if hasattr(event, 'exitCode'): + exitCode = event.exitCode + elif hasattr(event, 'data'): + exitCode = repr(event.data) + self.abort("postmaster %d unexpectedly terminated (exit code %s)" % \ + (self.dbdir.getNumber(), exitCode)) + + def tearDown(self): + self.postmaster.removeHook(self.unexpectedTerminationHook) + if not self.aborted: + d = defer.Deferred() + self.postmaster.addHook(EventMatcher(ProcessEndedEvent), + lambda event: d.callback(None)) + self.postmaster.stop() + return d + else: + return True + + +class TestDatabaseSuite(TestSuite): + + args = (('dbname', str),) + needs = (('shell', "IShell or something"), + ('pg', "IPostmaster"),) + + def setUpDescription(self): + return "creating database %s at server %d" % \ + (self.dbname, self.pg.getNumber()) + def tearDownDescription(self): + return "dropping database %s at server %d" % \ + (self.dbname, self.pg.getNumber()) + + def getDbname(self): + return self.dbname + + def setUp(self): + setattr(self, "getPort", self.pg.getPort) + setattr(self, "getNumber", self.pg.getNumber) + + self.proc = self.shell.runCommand( + 'createdb%d' % self.pg.getNumber(), + 'createdb', + args = ['createdb', + '-p', str(self.getPort()), self.dbname], + lineBasedOutput=True) + + d = defer.Deferred() + self.proc.addHook(EventMatcher(ProcessEndedEvent), + self.createdb_terminated, d) + self.proc.start() + return d + + def createdb_terminated(self, event, d): + if event.exitCode != 0: + d.errback(Exception("createdb terminated with code %d" % \ + event.exitCode)) + else: + d.callback(None) + + def tearDown(self): + if self.pg.aborted: + return True + + # Hm.. this interferes with the postmaster suites, which need + # to be started and stopped several times on top of a test database, + # however, creating and dropping it certainly depends on a running + # postmaster. Not sure how to solve this, at the moment I'm just + # skipping cleanup, i.e. dropdb. + return True + + self.proc = self.shell.runCommand( + 'dropdb%d' % self.pg.getNumber(), + 'dropdb', + args = ['dropdb', + '-p', str(self.getPort()), self.dbname], + lineBasedOutput=True) + + d = defer.Deferred() + self.proc.addHook(EventMatcher(ProcessEndedEvent), + self.dropdb_terminated, d) + self.proc.start() + return d + + def dropdb_terminated(self, event, d): + if event.exitCode != 0: + d.errback(Exception("dropdb returned with %d" % \ + event.exitCode)) + else: + d.callback(None) + + +class SqlConnectionSuite(TestSuite): + + args = (('dbname', str),) + needs = (('shell', "IShell or something"), + ('db', "IPostmaster")) + + def setUpDescription(self): + return "connecting to database %s at server %d" % \ + (self.dbname, self.db.getNumber()) + def tearDownDescription(self): + return "disconnecting from database %s at server %d" % \ + (self.dbname, self.db.getNumber()) + + def getDbname(self): + return self.dbname + + def setUp(self): + self.psql = self.shell.runCommand( + 'psql%d' % self.db.getNumber(), + 'psql', + args=['psql', '-A', + '-p', str(self.db.getPort()), self.dbname]) + + # initialize the output buffer and attach a first output collector + # *before* the process is started. + self.output_buffer = "" + d = defer.Deferred() + self.outputCollectorDeferred = d + self.outputCollectorHook = self.psql.addHook( + EventMatcher(ProcessOutputEvent), self.outputCollector, + None, d) + + # Mark as being in used, until we get to the commandline + self.inUse = True + self.workQueue = [] + + # also add a termination hook + self.unexpectedTerminationHook = self.psql.addHook( + EventMatcher(ProcessEndedEvent), self.psql_terminated) + + # then schedule start of the psql process and return the deferred + # *before* starting the process. + reactor.callLater(0.0, self.psql.start) + return d + + def psql_terminated(self, event): + exitCode = "undef" + if hasattr(event, 'exitCode'): + exitCode = event.exitCode + elif hasattr(event, 'data'): + exitCode = repr(event.data) + + # If there's an outputCollectorHook, the abort method won't catch + # and we have to wait for the timeout to trigger, instead of + # acting on process termination. We thus save the outputCollector + # deferred and send it an errback with the failure. + if self.outputCollectorHook: + self.outputCollectorDeferred.errback( \ + TestAborted("psql to server %d unexpectedly terminated (exit code %s)" % ( \ + self.db.getNumber(), exitCode))) + self.abort( + "psql to server %d unexpectedly terminated (exit code %s)" % ( \ + self.db.getNumber(), exitCode)) + + def tearDown(self): + self.psql.removeHook(self.unexpectedTerminationHook) + + d = defer.Deferred() + self.psql.addHook(EventMatcher(ProcessEndedEvent), + lambda event: d.callback(None)) + reactor.callLater(0.0, self.psql.write, "\\q\n") + reactor.callLater(5.0, self.psql.stop) + return d + + def outputCollector(self, event, query, d): + self.output_buffer += event.data + + cmdprompt = self.dbname + '=#' + cpos = self.output_buffer.find(cmdprompt) + + if cpos >= 0: + self.psql.removeHook(self.outputCollectorHook) + self.outputCollectorHook = False + result = self.output_buffer[:cpos] + self.output_buffer = self.output_buffer[cpos + len(cmdprompt):] + if len(self.output_buffer) > 0 and self.output_buffer != ' ': + print "rest: %s" % repr(self.output_buffer) + if d: + # remove the command prompt at the end + result = result[:cpos] + + if query: + # remove the query string at the beginning + query_len = len(query) + if result[:query_len] == (query): + result = result[query_len:] + while (len(result) > 1) and (result[0] in ("\n", "\r")): + result = result[1:] + reactor.callLater(0.0, d.callback, result) + + self.inUse = False + if len(self.workQueue) > 0: + assert not self.inUse + job = self.workQueue.pop() + d1 = job['method'](*job['args']) + d1.chainDeferred(job['deferred']) + + def query(self, query): + if self.inUse: + d = defer.Deferred() + self.workQueue.append({'deferred': d, + 'method': self.query, + 'args': (query,)}) + return d + + assert not self.inUse + assert not self.outputCollectorHook + + self.inUse = True + self.output_buffer = "" + d = defer.Deferred() + self.outputCollectorHook = self.psql.addHook( + EventMatcher(ProcessOutputEvent), self.outputCollector, query, d) + d.addCallback(self.parseQueryResult) + + # defer writing to the process, so that the caller has the + # opportunity to add callbacks to the deferred we return. + reactor.callLater(0.0, self.psql.write, query + "\n") + + return d + + def parseQueryResult(self, result): + lines = result.split('\n') + # strip empty newlines at the end + while len(lines[-1].strip()) == 0: + lines = lines[:-1] + #print "lines: %s" % lines + + try: + assert len(lines) >= 2 + + lines = map(lambda x: x.strip(), lines) + headLine = lines[0] + tailLine = lines[-1] + + fields = headLine.split('|') + rows = [] + for row in lines[1:-1]: + attrs = row.split('|') + assert len(attrs) == len(fields) + x = {} + for i in range(len(attrs)): + x[fields[i]] = attrs[i].strip() + rows.append(x) + + x = re.compile("\((\d+) rows?\)").search(tailLine) + if x: + if not int(x.group(1)) == len(rows): + print "number of rows doesn't match: %s vs %d" % ( + x.group(1), len(rows)) + print "for: %s" % lines + else: + raise Exception("final number of rows line doesn't match.\n------------\n%s\n---------------\n" % lines) + return rows + except Exception, e: + import traceback + print "error parsing query result: %s" % e + traceback.print_exc() + raise e + # return [] + + def operation(self, query, expResult): + if self.inUse: + d = defer.Deferred() + self.workQueue.append({'deferred': d, + 'method': self.operation, + 'args': (query, expResult)}) + return d + + assert not self.inUse + assert not self.outputCollectorHook + + self.inUse = True + self.output_buffer = "" + d = defer.Deferred() + self.outputCollectorDeferred = d + self.outputCollectorHook = self.psql.addHook( + EventMatcher(ProcessOutputEvent), self.outputCollector, query, d) + d.addCallback(self.checkQueryResult, expResult) + + # defer writing to the process, so that the caller has the + # opportunity to add callbacks to the deferred we return. + reactor.callLater(0.0, self.psql.write, query + "\n") + + return d + + def checkQueryResult(self, result, expResult): + x = re.compile("^" + expResult, re.M).search(result) + if not x: + print "result:\n---------\n%s\n----------\n" % result + print "expResult:\n---------\n%s\n----------\n" % expResult + print "WRONG RESULT!" + raise Exception("didn't get expected result") + return result + + +class TestDatabaseConnection(BaseTest): + + needs = (('conn', "ISqlConnection"),) + + description = "database connection" + + def run(self): + return self.conn.query("SELECT 1 AS test;") + + +# FIXME: that's not actually a test, but it modifies the database state +class PopulateTestDatabase(BaseTest): + + needs = (('conn', "ISqlConnection"),) + + description = "populate test database" + + def run(self): + conn = self.conn + + # Create a test table for use in TestConcurrentUpdates and fill it + # with two test tuples. + d = conn.operation("CREATE TABLE test (i int PRIMARY KEY, t text);", + "CREATE TABLE") + d.addCallback(lambda x: conn.operation( + "INSERT INTO test VALUES (5, 'apple');", + "INSERT")) + d.addCallback(lambda x: conn.operation( + "INSERT INTO test VALUES (7, 'pear');", + "INSERT")) + d.addCallback(lambda x: conn.operation( + "INSERT INTO test VALUES (11, 'banana');", + "INSERT")) + return d + + +class TestTrueSerializabilityConcurrentUpdates(SyncTest): + """ Runs three transactions concurrently, each reading from what the + other writes in turn. Should raise a serialization failure, but + instead leads to wrong results, ATM. + """ + + description = "concurrent updates" + + needs = (('conn1', 'ISqlConnection'), + ('conn2', 'ISqlConnection'), + ('conn3', 'ISqlConnection')) + + def execOnAllConnections(self, sql, expRes): + deferreds = [] + for conn in self.connections: + d = conn.operation(sql, expRes) + deferreds.append(d) + + d = defer.DeferredList(deferreds, + consumeErrors=True, fireOnOneErrback=True) + return d + + def readValueThenWrite(self, conn, readFromId, writeToId): + d = conn.query("SELECT t FROM test WHERE i = %d;" % readFromId) + d.addCallback(self.writeValueBack, conn, writeToId) + return d + + def writeValueBack(self, result, conn, writeToId): + assert len(result) == 1 + row = result[0] + assert len(row) == 1 + value = row['t'] + d = conn.operation("UPDATE test SET t = '%s' WHERE i = %d;" % (value, writeToId), + "UPDATE") + return d + + def startConcurrentOperations(self): + d1 = self.readValueThenWrite(self.conn1, readFromId=5, writeToId=7) + d2 = self.readValueThenWrite(self.conn2, readFromId=7, writeToId=11) + d3 = self.readValueThenWrite(self.conn3, readFromId=11, writeToId=5) + return defer.DeferredList([d1, d2, d3], + consumeErrors=False, fireOnOneErrback=True) + + def run(self): + self.connections = [ + self.conn1, + self.conn2, + self.conn3] + + # begin a transaction on all three connections + self.syncCall(10, self.execOnAllConnections, + "BEGIN;", "BEGIN") + + # set their isolation level to SERIALIZABLE + self.syncCall(10, self.execOnAllConnections, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;", "SET") + + # concurrently let each of the three transactions read a value and + # write that to another tuple, wait for all the UPDATEs to complete + # before trying to commit any of the transactions + self.syncCall(10, self.startConcurrentOperations) + + # try to commit all three transactions (accepting both COMMIT or + # ERROR, we check the result later on). + self.syncCall(10, self.execOnAllConnections, + "COMMIT;", "COMMIT|ERROR"); + + # count the occurrance of each fruit + result = self.syncCall(10, self.conn1.query, + "SELECT t FROM test WHERE i IN (5, 7, 11);") + counters = {'banana': 0, 'apple': 0, 'pear': 0} + for row in result: + counters[row['t']] += 1 + + # you currently get one fruit each, as no transaction gets aborted, + # which is impossible if the transactions had been executed one + # after another. + self.assertNotEqual(counters.values(), [1, 1, 1]) + +class TestTrueSerializabilityConcurrentInsert(SyncTest): + """ Runs three transactions concurrently, each reading from what the + other writes in turn. Should raise a serialization failure, but + instead leads to wrong results, ATM. + """ + + description = "concurrent insert" + + needs = (('conn1', 'ISqlConnection'), + ('conn2', 'ISqlConnection')) + + def execOnAllConnections(self, sql, expRes): + deferreds = [] + for conn in self.connections: + d = conn.operation(sql, expRes) + deferreds.append(d) + + d = defer.DeferredList(deferreds, + consumeErrors=True, fireOnOneErrback=True) + return d + + def run(self): + self.connections = [ + self.conn1, + self.conn2] + + # begin a transaction on all three connections + self.syncCall(10, self.execOnAllConnections, + "BEGIN;", "BEGIN") + + # set their isolation level to SERIALIZABLE + self.syncCall(10, self.execOnAllConnections, + "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;", "SET") + + # try to commit all three transactions (accepting both COMMIT or + # ERROR, we check the result later on). + self.syncCall(10, self.execOnAllConnections, + "COMMIT;", "COMMIT|ERROR"); + + +# ****** test running code ************************************************ + +class Logger(object): + """ A simplistic logger that just writes it all into one single file. + """ + def __init__(self, logFileName): + self.logfile = open(logFileName, 'w') + + def __del__(self): + self.logfile.close() + + def callback(self, event): + self.logfile.write(str(event) + "\n") + self.logfile.flush() + +def main(argv): + print "Postgres dtester suite Copyright (c) 2004-2010, by Markus Wanner\n" + + config = { + 'temp-port': 65432, + + # by default, use the same installation directory as make check + 'inst_dir': os.path.join(os.getcwd(), 'tmp_check/install'), + + # and a similar prefix + 'pgdata_prefix': os.path.join(os.getcwd(), 'tmp_check/data-dtester'), + 'logfile' : os.path.join(os.getcwd(), 'dtester.log'), + } + + try: + opts, args = getopt.getopt(argv, + "h", + ["help", "temp-install", "top-builddir=", "temp-port=", + "multibyte="]) + except getopt.GetoptError: + usage() + sys.exit(2) + + for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit() + elif opt in ("--temp-install"): + config["temp-install"] = True + elif opt in ("--temp-port"): + try: + arg = int(arg) + if arg >= 1024 and arg <= 65535: + config["temp-port"] = arg + else: + print "temp-port out of range." + sys.exit(2) + except ValueError: + print "Fatal: invalid temp-port specified" + sys.exit(2) + elif opt in ("--top-builddir"): + config["top-builddir"] = arg + + + if not config.has_key('bindir'): + bindir = '@bindir@' + if bindir[0] == '/': + bindir = bindir[1:] + config['bindir'] = os.path.join(config['inst_dir'], bindir) + if not config.has_key('libdir'): + libdir = '@libdir@' + if libdir[0] == '/': + libdir = libdir[1:] + config['libdir'] = os.path.join(config['inst_dir'], libdir) + if not config.has_key('datadir'): + datadir = '@datadir@' + if datadir[0] == '/': + datadir = datadir[1:] + config['datadir'] = os.path.join(config['inst_dir'], datadir) + + + # FIXME: should not have to be here + logger = Logger(config['logfile']) + config['main_logging_hook'] = (EventMatcher(Event), logger.callback) + + + # definition of tests and suites, including their dependencies + tdef = { + # runs 'make install' to make sure the installation is up to date + 'temp_install': {'class': InstallationSuite, + 'uses': ('__system__',)}, + + # runs initdb, providing the Postgres data directory + 'initdb-0': {'class': InitdbSuite, + 'uses': ('temp_install',), + 'args': (0,)}, + + # runs a postmaster on the created database directory + 'pg-0': {'class': PostmasterSuite, + 'uses': ('temp_install', 'initdb-0')}, + + # creates a test database on pg-0 + 'testdb': {'class': TestDatabaseSuite, + 'uses': ('temp_install', 'pg-0'), + 'args': ('testdb',)}, + + # open two connections + 'conn-0A': {'class': SqlConnectionSuite, + 'uses': ('temp_install', 'pg-0'), + 'args': ('testdb',), + 'depends': ('testdb',)}, + 'conn-0B': {'class': SqlConnectionSuite, + 'uses': ('temp_install', 'pg-0'), + 'args': ('testdb',), + 'depends': ('testdb',)}, + 'conn-0C': {'class': SqlConnectionSuite, + 'uses': ('temp_install', 'pg-0'), + 'args': ('testdb',), + 'depends': ('testdb',)}, + + # test the connections + 'test-conn-0A': {'class': TestDatabaseConnection, + 'uses': ('conn-0A',)}, + 'test-conn-0B': {'class': TestDatabaseConnection, + 'uses': ('conn-0B',)}, + 'test-conn-0C': {'class': TestDatabaseConnection, + 'uses': ('conn-0C',)}, + + # populate the test database + 'populate-testdb': {'class': PopulateTestDatabase, + 'uses': ('conn-0A',), + 'onlyAfter': ('test-conn-0A', 'test-conn-0B', + 'test-conn-0C')}, + + 'ser-updates': {'class': TestTrueSerializabilityConcurrentUpdates, + 'uses': ('conn-0A', 'conn-0B', 'conn-0C'), + 'onlyAfter': ('populate-testdb',)}, + + 'ser-insert': {'class': TestTrueSerializabilityConcurrentInsert, + 'uses': ('conn-0A', 'conn-0B'), + 'onlyAfter': ('populate-testdb',)}, + } + + + reporter = StreamReporter() + runner = Runner(reporter, testTimeout=60, suiteTimeout=180) + runner.run(tdef, config) + + +if __name__ == "__main__": + main(sys.argv[1:]) + ============================================================ --- GNUmakefile.in da68d137cee6cec6458a1d9e877c5b623a11415a +++ GNUmakefile.in b605a66cee8f77978da09c326cf9dbaeba464bd2 @@ -57,7 +57,7 @@ check: all check: all -check installcheck installcheck-parallel: +check dcheck installcheck installcheck-parallel: $(MAKE) -C src/test $@ GNUmakefile: GNUmakefile.in $(top_builddir)/config.status ============================================================ --- src/test/regress/GNUmakefile ed08fe1be025ede31fa2dba35f81f653809a2096 +++ src/test/regress/GNUmakefile 55ae74cf991ae07b0d5b082882ff5ba1f1ef4036 @@ -135,6 +135,23 @@ tablespace-setup: ## +## Prepare for dtester tests +## +pg_dtester.py: pg_dtester.py.in GNUmakefile $(top_builddir)/src/Makefile.global + sed -e 's,@bindir@,$(bindir),g' \ + -e 's,@libdir@,$(libdir),g' \ + -e 's,@pkglibdir@,$(pkglibdir),g' \ + -e 's,@datadir@,$(datadir),g' \ + -e 's/@VERSION@/$(VERSION)/g' \ + -e 's/@host_tuple@/$(host_tuple)/g' \ + -e 's,@GMAKE@,$(MAKE),g' \ + -e 's/@enable_shared@/$(enable_shared)/g' \ + -e 's/@GCC@/$(GCC)/g' \ + $< >$@ + chmod a+x $@ + + +## ## Run tests ## @@ -152,6 +169,11 @@ standbycheck: all standbycheck: all $(pg_regress_call) --psqldir=$(PSQLDIR) --schedule=$(srcdir)/standby_schedule --use-existing +dcheck: pg_dtester.py + ./pg_dtester.py --temp-install --top-builddir=$(top_builddir) \ + --multibyte=$(MULTIBYTE) $(MAXCONNOPT) $(NOLOCALE) + + # old interfaces follow... runcheck: check
Attachment
pgsql-hackers by date: