From 192d9d6fbaa3f5a88c3a3b3d4dafc9238edc1005 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Fri, 28 Jan 2011 13:20:40 -0500 Subject: [PATCH 01/11] Start test by dropping databases, if they're still there from some previous run --- clustertest/run_all_disorder_tests.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/clustertest/run_all_disorder_tests.sh b/clustertest/run_all_disorder_tests.sh index b67d80f6..a955eee2 100755 --- a/clustertest/run_all_disorder_tests.sh +++ b/clustertest/run_all_disorder_tests.sh @@ -19,4 +19,16 @@ else exit 1 fi +echo "Start test by dropping databases based on config in ${DBP}" +for db in db1 db2 db3 db4 db5; do + PGTESTHOST=`grep "database.${db}.host=" ${DBP} | cut -d = -f 2` + PGTESTPORT=`grep "database.${db}.port=" ${DBP} | cut -d = -f 2` + PGTESTUSER=`grep "database.${db}.user.slony=" ${DBP} | cut -d = -f 2` + PGTESTDATABASE=`grep "database.${db}.dbname=" ${DBP} | cut -d = -f 2` + PGTESTPATH=`grep "database.${db}.pgsql.path=" ${DBP} | cut -d = -f 2` + #PGTESTPASSWORD=`grep "database.${db}.password=" ${DBP} | cut -d = -f 2` + echo "Dropping database: ${PGTESTPATH}/dropdb -h ${PGTESTHOST} -p ${PGTESTPORT} -U ${PGTESTUSER} ${PGTESTDATABASE}" + ${PGTESTPATH}/psql -h ${PGTESTHOST} -p ${PGTESTPORT} -U ${PGTESTUSER} -d template1 -c "drop database if exists ${PGTESTDATABASE};" +done + java -jar ${CLUSTERTESTHOME}/build/jar/clustertest-coordinator.jar ${DBP} disorder/tests/disorder_tests.js From b2248daef15ddc480598592808efcc4a3e8abea9 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Fri, 28 Jan 2011 17:13:58 -0500 Subject: [PATCH 02/11] Add extra slonik echo commands to indicate a 'backlink' to the disorder test function which generated this bit of slonik code. --- .../disorder/tests/AddPathsAfterSubscribe.js | 4 +-- clustertest/disorder/tests/BasicTest.js | 26 +++++++++---------- clustertest/disorder/tests/DropPath.js | 3 ++- clustertest/disorder/tests/DropSet.js | 3 ++- clustertest/disorder/tests/EmptySet.js | 3 ++- clustertest/disorder/tests/ExecuteScript.js | 18 ++++++++----- clustertest/disorder/tests/FailNodeTest.js | 6 +++-- clustertest/disorder/tests/Failover.js | 14 +++++----- clustertest/disorder/tests/InitialCopyFail.js | 3 ++- clustertest/disorder/tests/LongTransaction.js | 3 ++- clustertest/disorder/tests/OmitCopy.js | 3 ++- clustertest/disorder/tests/RenameTests.js | 5 ++-- clustertest/disorder/tests/Unsubscribe.js | 3 ++- .../disorder/tests/UnsubscribeBeforeEnable.js | 1 - 14 files changed, 55 insertions(+), 40 deletions(-) diff --git a/clustertest/disorder/tests/AddPathsAfterSubscribe.js b/clustertest/disorder/tests/AddPathsAfterSubscribe.js index e12a3be5..11b0b094 100644 --- a/clustertest/disorder/tests/AddPathsAfterSubscribe.js +++ b/clustertest/disorder/tests/AddPathsAfterSubscribe.js @@ -32,7 +32,7 @@ AddPathsAfterSubscribe.prototype.runTest = function() { //Now add the Paths. var slonikPreamble = this.getSlonikPreamble(); - var slonikScript=''; + var slonikScript='echo \'AddPathsAfterSubscribe.prototype.runTest\';\n'; for(var serverId=1; serverId <= this.getNodeCount(); serverId++) { for(var clientId=1; clientId <= this.getNodeCount(); clientId++) { if(serverId!=clientId) { @@ -60,7 +60,7 @@ AddPathsAfterSubscribe.prototype.setupReplication = function() { var result = 0; var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript='echo \'AddPathsAfterSubscribe.prototype.setupReplication\';\n'; // slonikScript += 'sleep(seconds=60);' slonikScript += 'init cluster(id=1);\n'; diff --git a/clustertest/disorder/tests/BasicTest.js b/clustertest/disorder/tests/BasicTest.js index e9f7a3c0..cbf49b62 100644 --- a/clustertest/disorder/tests/BasicTest.js +++ b/clustertest/disorder/tests/BasicTest.js @@ -74,7 +74,7 @@ BasicTest.prototype.setupReplication = function() { var result = 0; var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.setupReplication\';\n'; for ( var idx = 1; idx <= this.getNodeCount(); idx++) { slonikScript += 'try {\n'; @@ -157,7 +157,7 @@ BasicTest.prototype.setupReplication = function() { */ BasicTest.prototype.addCompletePaths = function() { var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.addCompletePaths\';\n'; slonikScript += 'store path(server=1,client=4,conninfo=@CONNINFO1 );\n'; //slonikScript += 'wait for event(origin=4,confirmed=all,wait on=4);\n'; slonikScript += 'store path(server=4,client=1,conninfo=@CONNINFO4 );\n'; @@ -186,7 +186,7 @@ BasicTest.prototype.addCompletePaths = function() { } BasicTest.prototype.getAddTableSlonikScript=function() { - var slonikScript=''; + var slonikScript = 'echo \'BasicTest.prototype.getAddTableSlonikScript\';\n'; slonikScript += ' set add table(id=1, set id=1, fully qualified name=\'disorder.do_customer\',origin=1);\n'; slonikScript += ' set add sequence(id=1, set id=1, fully qualified name=\'disorder.do_customer_c_id_seq\',origin=1);\n'; @@ -202,10 +202,7 @@ BasicTest.prototype.getAddTableSlonikScript=function() { slonikScript += ' set add table(id=6, set id=1, fully qualified name=\'disorder.do_order_line\',origin=1);\n'; - slonikScript += ' set add table(id=7, set id=1, fully qualified name=\'disorder.do_config\',origin=1);\n'; - - this.tableIdCounter=8; this.sequenceIdCounter=7; return slonikScript; @@ -215,7 +212,7 @@ BasicTest.prototype.addTables = function() { var result = 0; var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.addTables\';\n'; slonikScript=this.getAddTableSlonikScript(); var thisRef = this; var slonik = this.coordinator.createSlonik('init', slonikPre, slonikScript); @@ -375,7 +372,7 @@ BasicTest.prototype.getSyncWaitTime = function() { */ BasicTest.prototype.slonikSync = function(setid, originid) { var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.slonikSync\';\n'; slonikScript += ' sync(id=' + originid + ');\n'; slonikScript += ' wait for event(origin=' + originid + ', wait on=' + originid + ',confirmed=all,timeout=' + this.getSyncWaitTime() +');\n'; @@ -435,7 +432,8 @@ BasicTest.prototype.slonikSync = function(setid, originid) { BasicTest.prototype.moveSet = function(setid, origin_node, destination_node) { var preamble = this.getSlonikPreamble(); - var slonikScript = 'lock set(id=' + setid + ',origin=' + origin_node + var slonikScript = 'echo \'BasicTest.prototype.moveSet\';\n'; + slonikScript += 'lock set(id=' + setid + ',origin=' + origin_node + ');\n' + 'move set(id=' + setid + ',old origin=' + origin_node + ', new origin=' + destination_node + ');\n' + 'wait for event(wait on=' + origin_node + ', origin=' @@ -464,7 +462,7 @@ BasicTest.prototype.moveSet = function(setid, origin_node, destination_node) { BasicTest.prototype.subscribeSetBackground = function(setid, origin_node, provider_node, subscriber_nodes) { - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.subscribeSetBackground\';\n'; var preamble = this.getSlonikPreamble(); var slonikList = []; @@ -534,7 +532,7 @@ BasicTest.prototype.subscribeSet = function(set_id, origin_node,provider_node, */ BasicTest.prototype.teardownSlony = function() { var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; + var slonikScript = 'echo \'BasicTest.prototype.teardownSlony\';\n'; for ( var idx = 1; idx <= this.getNodeCount(); idx++) { slonikScript += 'try {\n'; @@ -641,7 +639,8 @@ BasicTest.prototype.getClusterName = function () { */ BasicTest.prototype.createSecondSet=function(origin) { var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'create set(id=2, origin=' + origin + ',comment=\'second set\');\n' + var slonikScript = 'echo \'BasicTest.prototype.createSecondSet\';\n'; + slonikScript += 'create set(id=2, origin=' + origin + ',comment=\'second set\');\n' + 'set add table(set id=2,origin=' + origin + ',id=' + this.tableIdCounter +', fully qualified name=\'disorder.do_item_review\');\n' + 'set add sequence(set id=2, origin=' + origin + ', id=' + this.sequenceIdCounter @@ -740,7 +739,8 @@ BasicTest.prototype.getCurrentOrigin=function() { * */ BasicTest.prototype.generateSlonikWait=function(event_node) { - var slonikScript = ' wait for event(origin=' + event_node + ', wait on=' + var slonikScript = 'echo \'BasicTest.prototype.generateSlonikWait\';\n'; + slonikScript += ' wait for event(origin=' + event_node + ', wait on=' + event_node + ',confirmed=all);\n'; return slonikScript; } diff --git a/clustertest/disorder/tests/DropPath.js b/clustertest/disorder/tests/DropPath.js index 55d39ffd..b52b4d3a 100644 --- a/clustertest/disorder/tests/DropPath.js +++ b/clustertest/disorder/tests/DropPath.js @@ -74,7 +74,8 @@ DropPath.prototype.runTest = function() { DropPath.prototype.dropPath=function(server_id, client_id,event_node,expectFailure) { this.coordinator.log('dropPath ' + server_id + ',' + client_id + ',' + event_node); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'drop path(server=' + server_id + ',client = ' + client_id + var slonikScript='echo \'DropPath.prototype.dropPath\';\n'; + slonikScript += 'drop path(server=' + server_id + ',client = ' + client_id + ',event node=' + event_node + ');\n' + 'wait for event(origin=' + event_node + ',wait on=' + event_node + ',confirmed=all);\n'; diff --git a/clustertest/disorder/tests/DropSet.js b/clustertest/disorder/tests/DropSet.js index e7eb52d6..57871008 100644 --- a/clustertest/disorder/tests/DropSet.js +++ b/clustertest/disorder/tests/DropSet.js @@ -67,7 +67,8 @@ DropSet.prototype.testDropConcurrentSubscribe=function() { this.subscribeSet(2,1,1,[3]); var subscribeArray = this.subscribeSetBackground(2,1,3,[4] ); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'drop set (id=2, origin=1);\n' + var slonikScript='echo \'DropSet.prototype.testDropConcurrentSubscribe\';\n'; + slonikScript += 'drop set (id=2, origin=1);\n' + 'wait for event(origin=1, confirmed=3,wait on=1);\n'; var slonik=this.coordinator.createSlonik('drop set 2',slonikPreamble,slonikScript); diff --git a/clustertest/disorder/tests/EmptySet.js b/clustertest/disorder/tests/EmptySet.js index b9db8289..94ef62d8 100644 --- a/clustertest/disorder/tests/EmptySet.js +++ b/clustertest/disorder/tests/EmptySet.js @@ -38,7 +38,8 @@ EmptySet.prototype.runTest = function() { * Create a second set */ var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'create set(id=2, origin=1);\n'; + var slonikScript = 'echo \'EmptySet.prototype.runTest\';\n'; + slonikScript += 'create set(id=2, origin=1);\n'; slonik = this.coordinator.createSlonik('create set 2',slonikPreamble,slonikScript); slonik.run(); this.coordinator.join(slonik); diff --git a/clustertest/disorder/tests/ExecuteScript.js b/clustertest/disorder/tests/ExecuteScript.js index 56a5fa4a..cd034c86 100644 --- a/clustertest/disorder/tests/ExecuteScript.js +++ b/clustertest/disorder/tests/ExecuteScript.js @@ -176,7 +176,9 @@ ExecuteScript.prototype.testAddDropColumn = function(setid, eventNode, fileWriter .write('ALTER TABLE disorder.do_order ADD COLUMN testcolumn int4 default 7;'); fileWriter.close(); - var slonikScript = 'EXECUTE SCRIPT( SET ID=' + setid + ', FILENAME=\'' + var slonikScript = 'echo \'ExecuteScript.prototype.testAddDropColumn\';\n'; + + slonikScript += 'EXECUTE SCRIPT( SET ID=' + setid + ', FILENAME=\'' + scriptFile.getAbsolutePath() + '\' , EVENT NODE=' + eventNode + ' );\n' + 'SYNC(id=' + eventNode + ');\n' + 'wait for event(origin=' + eventNode @@ -265,7 +267,8 @@ ExecuteScript.prototype.createAndReplicateTestTable=function() { fileWriter .write('CREATE TABLE disorder.test_transient(id serial,data text, primary key(id));'); fileWriter.close(); - var slonikScript = 'EXECUTE SCRIPT( SET ID=1' + ', FILENAME=\'' + var slonikScript = 'echo \'ExecuteScript.prototype.createAndReplicateTestTable\';\n'; + slonikScript += 'EXECUTE SCRIPT( SET ID=1' + ', FILENAME=\'' + scriptFile.getAbsolutePath() + '\' , EVENT NODE=1 );\n' + 'SYNC(id=1);\n' + 'wait for event(origin=1, confirmed=all, wait on=1);\n' @@ -351,7 +354,8 @@ ExecuteScript.prototype.testDDLFailure = function() { /** * First execute the script against node 2, it should fail. */ - var slonikScript = 'EXECUTE SCRIPT( SET ID=2' + ', FILENAME=\'' + var slonikScript = 'echo \'ExecuteScript.prototype.testDDLFailure\';\n'; + slonikScript += 'EXECUTE SCRIPT( SET ID=2' + ', FILENAME=\'' + scriptFile.getAbsolutePath() + '\' , EVENT NODE=2 );\n'; var slonikPreamble = this.getSlonikPreamble(); var slonik = this.coordinator.createSlonik('slonik, ddlFailure',slonikPreamble,slonikScript); @@ -405,7 +409,7 @@ ExecuteScript.prototype.dropTestTable=function(node_id,set_id,removeFromReplicat var fileWriter = new java.io.FileWriter(scriptFile_drop); fileWriter.write('DROP TABLE disorder.test_transient;'); fileWriter.close(); - var slonikScript =''; + var slonikScript ='echo \'ExecuteScript.prototype.dropTestTable\';\n'; if(removeFromReplication) { slonikScript+='set drop table(id=' + (this.tableIdCounter-1) + ',origin=' + node_id + ');\n'; } @@ -423,7 +427,8 @@ ExecuteScript.prototype.dropTestTable=function(node_id,set_id,removeFromReplicat } ExecuteScript.prototype.dropSet3 = function(set_origin) { var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'drop set (id=3,origin=' + set_origin + ');' + var slonikScript ='echo \'ExecuteScript.prototype.dropSet3\';\n'; + slonikScript =+ 'drop set (id=3,origin=' + set_origin + ');' + 'wait for event(origin=' + set_origin + ', wait on=' + set_origin + ', confirmed=all);\n'; var slonik=this.coordinator.createSlonik('DROP SET',slonikPreamble,slonikScript); slonik.run(); @@ -445,7 +450,8 @@ ExecuteScript.prototype.performInsert=function(node_id) { var fileWriter = new java.io.FileWriter(scriptFile); fileWriter.write('INSERT INTO disorder.do_customer(c_name) VALUES (disorder.digsyl(' + new java.util.Date().getTime() +',16));'); fileWriter.close(); - var slonikScript = 'EXECUTE SCRIPT( SET ID=1' + ', FILENAME=\'' + var slonikScript = 'echo \'ExecuteScript.prototype.performInsert\';\n'; + slonikScript += 'EXECUTE SCRIPT( SET ID=1' + ', FILENAME=\'' + scriptFile + '\' , EVENT NODE=' + node_id +' );\n' + 'SYNC(id=' + node_id + ');\n' + 'wait for event(origin=' + node_id + ', confirmed=all, wait on=' + node_id+');\n'; diff --git a/clustertest/disorder/tests/FailNodeTest.js b/clustertest/disorder/tests/FailNodeTest.js index 56d59992..8ff14352 100644 --- a/clustertest/disorder/tests/FailNodeTest.js +++ b/clustertest/disorder/tests/FailNodeTest.js @@ -240,7 +240,8 @@ FailNodeTest.prototype.failNode=function(nodeId, expectFailure) { this.slonArray[nodeId-1].stop(); this.coordinator.join(this.slonArray[nodeId-1]); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'DROP NODE(id=' + nodeId + ',event node=1);\n'; + var slonikScript = 'echo \'FailNodeTest.prototype.failNode\';\n'; + slonikScript += 'DROP NODE(id=' + nodeId + ',event node=1);\n'; for(var idx=2; idx <= this.getNodeCount(); idx++) { if(idx == nodeId) { continue; @@ -281,7 +282,8 @@ FailNodeTest.prototype.checkNodeNotExists=function(check_node,nodeid) { FailNodeTest.prototype.reAddNode = function(node_id,origin,provider) { this.coordinator.log('reAddNode(' + node_id + ',' + provider + ')'); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'try {\n' + var slonikScript = 'echo \'FailNodeTest.prototype.reAddNode\';\n'; + slonikScript += 'try {\n' + 'uninstall node(id=' + node_id+');\n' + '}\n' + 'on error {\n' diff --git a/clustertest/disorder/tests/Failover.js b/clustertest/disorder/tests/Failover.js index 6f69851b..9376a6e7 100644 --- a/clustertest/disorder/tests/Failover.js +++ b/clustertest/disorder/tests/Failover.js @@ -216,7 +216,8 @@ Failover.prototype.runTest = function() { */ this.coordinator.log("PROGRESS:Failing from node 1 to 4"); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'drop path(server=1,client=4);\n' + var slonikScript = 'echo \'Failover.prototype.runTest\';\n'; + slonikScript += 'drop path(server=1,client=4);\n' +'drop path(server=4,client=1);'; var slonik = this.coordinator.createSlonik('drop paths to node 4',slonikPreamble,slonikScript); slonik.run(); @@ -253,7 +254,8 @@ Failover.prototype.failNode=function(node_id,backup_id, expect_success) { this.coordinator.join(this.slonArray[node_id-1]); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'FAILOVER(id=' + node_id + ',backup node=' + backup_id +');\n'; + var slonikScript = 'echo \'Failover.prototype.failNode\';\n'; + slonikScript += 'FAILOVER(id=' + node_id + ',backup node=' + backup_id +');\n'; for(var idx=1; idx <= this.getNodeCount();idx++) { if(idx == node_id ) { continue; @@ -302,9 +304,8 @@ Failover.prototype.failNode=function(node_id,backup_id, expect_success) { Failover.prototype.addCompletePaths = function() { var slonikPre = this.getSlonikPreamble(); - var slonikScript = ''; - - for(var client=1; client <= this.getNodeCount(); client++) { + var slonikScript = 'echo \'Failover.prototype.addCompletePaths\';\n'; + for(var client=1; client <= this.getNodeCount(); client++) { for(var server=1; server <= this.getNodeCount(); server++) { if(server==client) { continue; @@ -326,7 +327,8 @@ Failover.prototype.addCompletePaths = function() { Failover.prototype.dropNode=function(node_id,event_node) { var slonikPreamble = this.getSlonikPreamble(); - var slonikScript = 'DROP NODE(id=' + node_id + ',event node=' + event_node +');\n'; + var slonikScript = 'echo \'Failover.prototype.dropNode\';\n'; + slonikScript += 'DROP NODE(id=' + node_id + ',event node=' + event_node +');\n'; for(var idx=1; idx <= this.getNodeCount(); idx++) { if(idx == node_id) { continue; diff --git a/clustertest/disorder/tests/InitialCopyFail.js b/clustertest/disorder/tests/InitialCopyFail.js index 696999e5..3da0e483 100644 --- a/clustertest/disorder/tests/InitialCopyFail.js +++ b/clustertest/disorder/tests/InitialCopyFail.js @@ -110,7 +110,8 @@ InitialCopyFail.prototype.runTest = function() { for(var idx=2; idx <= this.getNodeCount(); idx++) { - var slonikScript='subscribe set (id=1, provider=1, receiver=' + idx+ ');\n' + var slonikScript = 'echo \'InitialCopyFail.prototype.runTest\';\n'; + slonikScript += 'subscribe set (id=1, provider=1, receiver=' + idx+ ');\n' +'wait for event(origin=1, confirmed=' + idx + ', wait on=1);\n' +'sync(id=1);\n' +'wait for event(origin=1, confirmed=' + idx + ', wait on=1);\n'; diff --git a/clustertest/disorder/tests/LongTransaction.js b/clustertest/disorder/tests/LongTransaction.js index 78271394..af58ea3c 100644 --- a/clustertest/disorder/tests/LongTransaction.js +++ b/clustertest/disorder/tests/LongTransaction.js @@ -36,7 +36,8 @@ LongTransaction.prototype.runTest = function() { */ var txnConnection = this.startTransaction(); - var slonikScript = this.getAddTableSlonikScript(); + var slonikScript = 'echo \'LongTransaction.prototype.runTest\';\n'; + slonikScript += this.getAddTableSlonikScript(); var slonikPreamble = this.getSlonikPreamble(); var slonik = this.coordinator.createSlonik('add tables',slonikPreamble,slonikScript); slonik.run(); diff --git a/clustertest/disorder/tests/OmitCopy.js b/clustertest/disorder/tests/OmitCopy.js index c7bd90d4..e0b5d77a 100644 --- a/clustertest/disorder/tests/OmitCopy.js +++ b/clustertest/disorder/tests/OmitCopy.js @@ -133,7 +133,8 @@ OmitCopy.prototype.subscribeOmitCopy=function(origin,provider,subscriberNodeId,o this.testResults.assertCheck('history rows updated', updateCnt > 0,true); var slonikPreamble = this.getSlonikPreamble(); - var slonikScript="subscribe set(id=1, provider=" + provider+", receiver=" + subscriberNodeId+", omit copy=true, forward=yes);\n"; + var slonikScript = 'echo \'OmitCopy.prototype.subscribeOmitCopy\';\n'; + slonikScript += "subscribe set(id=1, provider=" + provider+", receiver=" + subscriberNodeId+", omit copy=true, forward=yes);\n"; slonikScript += ' wait for event (origin='+origin+', wait on='+provider+',confirmed=all);\n'; var slonik=this.coordinator.createSlonik('omit copy subscribe',slonikPreamble,slonikScript); diff --git a/clustertest/disorder/tests/RenameTests.js b/clustertest/disorder/tests/RenameTests.js index 76f536eb..6d547f84 100644 --- a/clustertest/disorder/tests/RenameTests.js +++ b/clustertest/disorder/tests/RenameTests.js @@ -116,12 +116,11 @@ RenameTests.prototype.executeScript=function(sql) { var fileWriter = new java.io.FileWriter(scriptFile); fileWriter.write(sql); fileWriter.close(); - var slonikScript = "EXECUTE SCRIPT(SET ID=3,FILENAME='" + scriptFile.getAbsolutePath() + var slonikScript = 'echo \'RenameTests.prototype.executeScript\';\n'; + slonikScript += "EXECUTE SCRIPT(SET ID=3,FILENAME='" + scriptFile.getAbsolutePath() + "',EVENT NODE=1);\n"; var slonik = this.coordinator.createSlonik('rename table',slonikPreamble,slonikScript); slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('rename table 1 worked okay',slonik.getReturnCode(),0); - - } diff --git a/clustertest/disorder/tests/Unsubscribe.js b/clustertest/disorder/tests/Unsubscribe.js index 6521630e..dd94c54e 100644 --- a/clustertest/disorder/tests/Unsubscribe.js +++ b/clustertest/disorder/tests/Unsubscribe.js @@ -104,7 +104,8 @@ Unsubscribe.prototype.runTest = function() { Unsubscribe.prototype.unsubscribe=function(node_id,set_id,expect_success) { var slonikPreamble = this.getSlonikPreamble(); - var slonikScript='unsubscribe set(id=' + set_id + ',receiver=' + node_id + ');\n' + var slonikScript = 'echo \'Unsubscribe.prototype.unsubscribe\';\n'; + slonikScript +='unsubscribe set(id=' + set_id + ',receiver=' + node_id + ');\n' + 'wait for event(origin=' + node_id + ',wait on=' + node_id + ',confirmed=all);\n'; var slonik = this.coordinator.createSlonik('unsubscribe ' , slonikPreamble,slonikScript); slonik.run(); diff --git a/clustertest/disorder/tests/UnsubscribeBeforeEnable.js b/clustertest/disorder/tests/UnsubscribeBeforeEnable.js index 864e131a..5ec48322 100644 --- a/clustertest/disorder/tests/UnsubscribeBeforeEnable.js +++ b/clustertest/disorder/tests/UnsubscribeBeforeEnable.js @@ -68,7 +68,6 @@ UnsubscribeBeforeEnable.prototype.runTest = function() { slon1 = this.coordinator.createSlonLauncher("db1"); slon2 = this.coordinator.createSlonLauncher("db2"); - // // This handler will unsubscribe the set. // It should be invoked after the store subscription has been processed. From 2146966f46f3e56baef7ab400fdee07b03e662c1 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Mon, 31 Jan 2011 12:07:13 -0500 Subject: [PATCH 03/11] Add slon.loglevel property value, per change to clustertest framework --- clustertest/conf/databases.properties.sample | 1 + 1 file changed, 1 insertion(+) diff --git a/clustertest/conf/databases.properties.sample b/clustertest/conf/databases.properties.sample index aeab1364..c3538f6d 100644 --- a/clustertest/conf/databases.properties.sample +++ b/clustertest/conf/databases.properties.sample @@ -67,6 +67,7 @@ database.db6.password.slony=slon slonik.path=/usr/local/pgsql8.3/bin/slonik slon.path=/usr/local/pgsql8.3/bin/slon +slon.loglevel=4 clustername=disorder_replica slony_dump.path=/usr/local/pgsql8.3/bin/slony1_dump.sh From 67e79d261413e026e60961e8866c13131c551084 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Mon, 31 Jan 2011 12:26:27 -0500 Subject: [PATCH 04/11] Add support for slonconf files, and some samples --- clustertest/conf/databases.properties.sample | 8 +- clustertest/conf/slon.1.conf.sample | 115 +++++++++++++++++++ clustertest/conf/slon.2.conf.sample | 115 +++++++++++++++++++ clustertest/conf/slon.3.conf.sample | 115 +++++++++++++++++++ clustertest/conf/slon.4.conf.sample | 115 +++++++++++++++++++ clustertest/conf/slon.5.conf.sample | 115 +++++++++++++++++++ 6 files changed, 582 insertions(+), 1 deletion(-) create mode 100644 clustertest/conf/slon.1.conf.sample create mode 100644 clustertest/conf/slon.2.conf.sample create mode 100644 clustertest/conf/slon.3.conf.sample create mode 100644 clustertest/conf/slon.4.conf.sample create mode 100644 clustertest/conf/slon.5.conf.sample diff --git a/clustertest/conf/databases.properties.sample b/clustertest/conf/databases.properties.sample index c3538f6d..400017c3 100644 --- a/clustertest/conf/databases.properties.sample +++ b/clustertest/conf/databases.properties.sample @@ -1,3 +1,9 @@ +database.db1.slonconf=/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.1.conf +database.db2.slonconf=/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.2.conf +database.db3.slonconf=/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.3.conf +database.db4.slonconf=/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.4.conf +database.db5.slonconf=/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.conf + database.db1.host=localhost database.db1.dbname=test1 database.db1.port=5432 @@ -67,7 +73,7 @@ database.db6.password.slony=slon slonik.path=/usr/local/pgsql8.3/bin/slonik slon.path=/usr/local/pgsql8.3/bin/slon -slon.loglevel=4 +### slon.loglevel=4 clustername=disorder_replica slony_dump.path=/usr/local/pgsql8.3/bin/slony1_dump.sh diff --git a/clustertest/conf/slon.1.conf.sample b/clustertest/conf/slon.1.conf.sample new file mode 100644 index 00000000..a4e834a8 --- /dev/null +++ b/clustertest/conf/slon.1.conf.sample @@ -0,0 +1,115 @@ +# Sets how many cleanup cycles to run before a vacuum is done. +# Range: [0,100], default: 3 +vac_frequency=2 + +# Aging interval to use for deleting old events and for trimming +# data from sl_log_1/sl_log_2 +cleanup_interval="30 seconds" + +# Debug log level (higher value ==> more output). Range: [0,4], default 4 +log_level=2 + +# Check for updates at least this often in milliseconds. +# Range: [10-60000], default 2000 +sync_interval=800 + +# Maximum amount of time in milliseconds before issuing a SYNC event, +# This prevents a possible race condition in which the action sequence +# is bumped by the trigger while inserting the log row, which makes +# this bump is immediately visible to the sync thread, but +# the resulting log rows are not visible yet. If the sync is picked +# up by the subscriber, processed and finished before the transaction +# commits, this transaction's changes will not be replicated until the +# next SYNC. But if all application activity suddenly stops, +# there will be no more sequence bumps, so the high frequent -s check +# won't detect that. Thus, the need for sync_interval_timeout. +# Range: [0-120000], default 10000 +sync_interval_timeout=2000 + +# Maximum number of SYNC events to group together when/if a subscriber +# falls behind. SYNCs are batched only if there are that many available +# and if they are contiguous. Every other event type in between leads to +# a smaller batch. And if there is only one SYNC available, even -g60 +# will apply just that one. As soon as a subscriber catches up, it will +# apply every single SYNC by itself. +# Range: [0,100], default: 6 +sync_group_maxsize=5 + +# Size above which an sl_log_? row's log_cmddata is considered large. +# Up to 500 rows of this size are allowed in memory at once. Rows larger +# than that count into the sync_max_largemem space allocated and free'd +# on demand. +# Range: [1024,32768], default: 8192 +sync_max_rowsize=16384 + +# Maximum amount of memory allowed for large rows. Note that the algorithm +# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This +# is done to ensure that a single row exceeding this limit alone does not +# stall replication. +# Range: [1048576,1073741824], default: 5242880 +sync_max_largemem=3276800 + +# If this parameter is 1, messages go both to syslog and the standard +# output. A value of 2 sends output only to syslog (some messages will +# still go to the standard output/error). The default is 0, which means +# syslog is off. +# Range: [0-2], default: 0 +syslog=1 + +# If true, include the process ID on each log line. Default is false. +log_pid=true + +# If true, include the timestamp on each log line. Default is true. +#log_timestamp=true + +# A strftime()-conformant format string for use with log timestamps. +# Default is '%Y-%m-%d %H:%M:%S %Z' +log_timestamp_format='%Y-%m-%d %H:%M:%S' + +# An interval in seconds at which the remote worker will output the +# query used to select log rows together with it's query plan. The +# default value of 0 turns this feature off. +# Range: [0-86400], default: 0 +#explain_interval=0 + +# Where to write the pid file. Default: no pid file +pid_file='/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.pid' + +# Sets the syslog "facility" to be used when syslog enabled. Valid +# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7. +syslog_facility=LOCAL0 + +# Sets the program name used to identify slon messages in syslog. +syslog_ident=slon + +# Set the cluster name that this instance of slon is running against +# default is to read it off the command line +cluster_name='disorder_replica' + +# Set slon's connection info, default is to read it off the command line +#conn_info='host=/tmp port=5432 user=slony' + +# maximum time planned for grouped SYNCs +# If replication is behind, slon will try to increase numbers of +# syncs done targetting that they should take this quantity of +# time to process. in ms +# Range [10000,600000], default 60000. +desired_sync_time=6000 + +# Execute the following SQL on each node at slon connect time +# useful to set logging levels, or to tune the planner/memory +# settings. You can specify multiple statements by separating +# them with a ; +sql_on_connection="SET log_min_duration_statement TO '1000';" + +# Command to run upon committing a log archive. +# This command is passed one parameter, namely the full pathname of +# the archive file +#command_on_logarchive="/usr/local/bin/movearchivetoarchive" + +# A PostgreSQL value compatible with ::interval which indicates how +# far behind this node should lag its providers. +# lag_interval="8 minutes" + +# Directory in which to stow sync archive files +# archive_dir="/tmp/somewhere" diff --git a/clustertest/conf/slon.2.conf.sample b/clustertest/conf/slon.2.conf.sample new file mode 100644 index 00000000..a4e834a8 --- /dev/null +++ b/clustertest/conf/slon.2.conf.sample @@ -0,0 +1,115 @@ +# Sets how many cleanup cycles to run before a vacuum is done. +# Range: [0,100], default: 3 +vac_frequency=2 + +# Aging interval to use for deleting old events and for trimming +# data from sl_log_1/sl_log_2 +cleanup_interval="30 seconds" + +# Debug log level (higher value ==> more output). Range: [0,4], default 4 +log_level=2 + +# Check for updates at least this often in milliseconds. +# Range: [10-60000], default 2000 +sync_interval=800 + +# Maximum amount of time in milliseconds before issuing a SYNC event, +# This prevents a possible race condition in which the action sequence +# is bumped by the trigger while inserting the log row, which makes +# this bump is immediately visible to the sync thread, but +# the resulting log rows are not visible yet. If the sync is picked +# up by the subscriber, processed and finished before the transaction +# commits, this transaction's changes will not be replicated until the +# next SYNC. But if all application activity suddenly stops, +# there will be no more sequence bumps, so the high frequent -s check +# won't detect that. Thus, the need for sync_interval_timeout. +# Range: [0-120000], default 10000 +sync_interval_timeout=2000 + +# Maximum number of SYNC events to group together when/if a subscriber +# falls behind. SYNCs are batched only if there are that many available +# and if they are contiguous. Every other event type in between leads to +# a smaller batch. And if there is only one SYNC available, even -g60 +# will apply just that one. As soon as a subscriber catches up, it will +# apply every single SYNC by itself. +# Range: [0,100], default: 6 +sync_group_maxsize=5 + +# Size above which an sl_log_? row's log_cmddata is considered large. +# Up to 500 rows of this size are allowed in memory at once. Rows larger +# than that count into the sync_max_largemem space allocated and free'd +# on demand. +# Range: [1024,32768], default: 8192 +sync_max_rowsize=16384 + +# Maximum amount of memory allowed for large rows. Note that the algorithm +# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This +# is done to ensure that a single row exceeding this limit alone does not +# stall replication. +# Range: [1048576,1073741824], default: 5242880 +sync_max_largemem=3276800 + +# If this parameter is 1, messages go both to syslog and the standard +# output. A value of 2 sends output only to syslog (some messages will +# still go to the standard output/error). The default is 0, which means +# syslog is off. +# Range: [0-2], default: 0 +syslog=1 + +# If true, include the process ID on each log line. Default is false. +log_pid=true + +# If true, include the timestamp on each log line. Default is true. +#log_timestamp=true + +# A strftime()-conformant format string for use with log timestamps. +# Default is '%Y-%m-%d %H:%M:%S %Z' +log_timestamp_format='%Y-%m-%d %H:%M:%S' + +# An interval in seconds at which the remote worker will output the +# query used to select log rows together with it's query plan. The +# default value of 0 turns this feature off. +# Range: [0-86400], default: 0 +#explain_interval=0 + +# Where to write the pid file. Default: no pid file +pid_file='/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.pid' + +# Sets the syslog "facility" to be used when syslog enabled. Valid +# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7. +syslog_facility=LOCAL0 + +# Sets the program name used to identify slon messages in syslog. +syslog_ident=slon + +# Set the cluster name that this instance of slon is running against +# default is to read it off the command line +cluster_name='disorder_replica' + +# Set slon's connection info, default is to read it off the command line +#conn_info='host=/tmp port=5432 user=slony' + +# maximum time planned for grouped SYNCs +# If replication is behind, slon will try to increase numbers of +# syncs done targetting that they should take this quantity of +# time to process. in ms +# Range [10000,600000], default 60000. +desired_sync_time=6000 + +# Execute the following SQL on each node at slon connect time +# useful to set logging levels, or to tune the planner/memory +# settings. You can specify multiple statements by separating +# them with a ; +sql_on_connection="SET log_min_duration_statement TO '1000';" + +# Command to run upon committing a log archive. +# This command is passed one parameter, namely the full pathname of +# the archive file +#command_on_logarchive="/usr/local/bin/movearchivetoarchive" + +# A PostgreSQL value compatible with ::interval which indicates how +# far behind this node should lag its providers. +# lag_interval="8 minutes" + +# Directory in which to stow sync archive files +# archive_dir="/tmp/somewhere" diff --git a/clustertest/conf/slon.3.conf.sample b/clustertest/conf/slon.3.conf.sample new file mode 100644 index 00000000..a4e834a8 --- /dev/null +++ b/clustertest/conf/slon.3.conf.sample @@ -0,0 +1,115 @@ +# Sets how many cleanup cycles to run before a vacuum is done. +# Range: [0,100], default: 3 +vac_frequency=2 + +# Aging interval to use for deleting old events and for trimming +# data from sl_log_1/sl_log_2 +cleanup_interval="30 seconds" + +# Debug log level (higher value ==> more output). Range: [0,4], default 4 +log_level=2 + +# Check for updates at least this often in milliseconds. +# Range: [10-60000], default 2000 +sync_interval=800 + +# Maximum amount of time in milliseconds before issuing a SYNC event, +# This prevents a possible race condition in which the action sequence +# is bumped by the trigger while inserting the log row, which makes +# this bump is immediately visible to the sync thread, but +# the resulting log rows are not visible yet. If the sync is picked +# up by the subscriber, processed and finished before the transaction +# commits, this transaction's changes will not be replicated until the +# next SYNC. But if all application activity suddenly stops, +# there will be no more sequence bumps, so the high frequent -s check +# won't detect that. Thus, the need for sync_interval_timeout. +# Range: [0-120000], default 10000 +sync_interval_timeout=2000 + +# Maximum number of SYNC events to group together when/if a subscriber +# falls behind. SYNCs are batched only if there are that many available +# and if they are contiguous. Every other event type in between leads to +# a smaller batch. And if there is only one SYNC available, even -g60 +# will apply just that one. As soon as a subscriber catches up, it will +# apply every single SYNC by itself. +# Range: [0,100], default: 6 +sync_group_maxsize=5 + +# Size above which an sl_log_? row's log_cmddata is considered large. +# Up to 500 rows of this size are allowed in memory at once. Rows larger +# than that count into the sync_max_largemem space allocated and free'd +# on demand. +# Range: [1024,32768], default: 8192 +sync_max_rowsize=16384 + +# Maximum amount of memory allowed for large rows. Note that the algorithm +# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This +# is done to ensure that a single row exceeding this limit alone does not +# stall replication. +# Range: [1048576,1073741824], default: 5242880 +sync_max_largemem=3276800 + +# If this parameter is 1, messages go both to syslog and the standard +# output. A value of 2 sends output only to syslog (some messages will +# still go to the standard output/error). The default is 0, which means +# syslog is off. +# Range: [0-2], default: 0 +syslog=1 + +# If true, include the process ID on each log line. Default is false. +log_pid=true + +# If true, include the timestamp on each log line. Default is true. +#log_timestamp=true + +# A strftime()-conformant format string for use with log timestamps. +# Default is '%Y-%m-%d %H:%M:%S %Z' +log_timestamp_format='%Y-%m-%d %H:%M:%S' + +# An interval in seconds at which the remote worker will output the +# query used to select log rows together with it's query plan. The +# default value of 0 turns this feature off. +# Range: [0-86400], default: 0 +#explain_interval=0 + +# Where to write the pid file. Default: no pid file +pid_file='/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.pid' + +# Sets the syslog "facility" to be used when syslog enabled. Valid +# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7. +syslog_facility=LOCAL0 + +# Sets the program name used to identify slon messages in syslog. +syslog_ident=slon + +# Set the cluster name that this instance of slon is running against +# default is to read it off the command line +cluster_name='disorder_replica' + +# Set slon's connection info, default is to read it off the command line +#conn_info='host=/tmp port=5432 user=slony' + +# maximum time planned for grouped SYNCs +# If replication is behind, slon will try to increase numbers of +# syncs done targetting that they should take this quantity of +# time to process. in ms +# Range [10000,600000], default 60000. +desired_sync_time=6000 + +# Execute the following SQL on each node at slon connect time +# useful to set logging levels, or to tune the planner/memory +# settings. You can specify multiple statements by separating +# them with a ; +sql_on_connection="SET log_min_duration_statement TO '1000';" + +# Command to run upon committing a log archive. +# This command is passed one parameter, namely the full pathname of +# the archive file +#command_on_logarchive="/usr/local/bin/movearchivetoarchive" + +# A PostgreSQL value compatible with ::interval which indicates how +# far behind this node should lag its providers. +# lag_interval="8 minutes" + +# Directory in which to stow sync archive files +# archive_dir="/tmp/somewhere" diff --git a/clustertest/conf/slon.4.conf.sample b/clustertest/conf/slon.4.conf.sample new file mode 100644 index 00000000..a4e834a8 --- /dev/null +++ b/clustertest/conf/slon.4.conf.sample @@ -0,0 +1,115 @@ +# Sets how many cleanup cycles to run before a vacuum is done. +# Range: [0,100], default: 3 +vac_frequency=2 + +# Aging interval to use for deleting old events and for trimming +# data from sl_log_1/sl_log_2 +cleanup_interval="30 seconds" + +# Debug log level (higher value ==> more output). Range: [0,4], default 4 +log_level=2 + +# Check for updates at least this often in milliseconds. +# Range: [10-60000], default 2000 +sync_interval=800 + +# Maximum amount of time in milliseconds before issuing a SYNC event, +# This prevents a possible race condition in which the action sequence +# is bumped by the trigger while inserting the log row, which makes +# this bump is immediately visible to the sync thread, but +# the resulting log rows are not visible yet. If the sync is picked +# up by the subscriber, processed and finished before the transaction +# commits, this transaction's changes will not be replicated until the +# next SYNC. But if all application activity suddenly stops, +# there will be no more sequence bumps, so the high frequent -s check +# won't detect that. Thus, the need for sync_interval_timeout. +# Range: [0-120000], default 10000 +sync_interval_timeout=2000 + +# Maximum number of SYNC events to group together when/if a subscriber +# falls behind. SYNCs are batched only if there are that many available +# and if they are contiguous. Every other event type in between leads to +# a smaller batch. And if there is only one SYNC available, even -g60 +# will apply just that one. As soon as a subscriber catches up, it will +# apply every single SYNC by itself. +# Range: [0,100], default: 6 +sync_group_maxsize=5 + +# Size above which an sl_log_? row's log_cmddata is considered large. +# Up to 500 rows of this size are allowed in memory at once. Rows larger +# than that count into the sync_max_largemem space allocated and free'd +# on demand. +# Range: [1024,32768], default: 8192 +sync_max_rowsize=16384 + +# Maximum amount of memory allowed for large rows. Note that the algorithm +# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This +# is done to ensure that a single row exceeding this limit alone does not +# stall replication. +# Range: [1048576,1073741824], default: 5242880 +sync_max_largemem=3276800 + +# If this parameter is 1, messages go both to syslog and the standard +# output. A value of 2 sends output only to syslog (some messages will +# still go to the standard output/error). The default is 0, which means +# syslog is off. +# Range: [0-2], default: 0 +syslog=1 + +# If true, include the process ID on each log line. Default is false. +log_pid=true + +# If true, include the timestamp on each log line. Default is true. +#log_timestamp=true + +# A strftime()-conformant format string for use with log timestamps. +# Default is '%Y-%m-%d %H:%M:%S %Z' +log_timestamp_format='%Y-%m-%d %H:%M:%S' + +# An interval in seconds at which the remote worker will output the +# query used to select log rows together with it's query plan. The +# default value of 0 turns this feature off. +# Range: [0-86400], default: 0 +#explain_interval=0 + +# Where to write the pid file. Default: no pid file +pid_file='/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.pid' + +# Sets the syslog "facility" to be used when syslog enabled. Valid +# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7. +syslog_facility=LOCAL0 + +# Sets the program name used to identify slon messages in syslog. +syslog_ident=slon + +# Set the cluster name that this instance of slon is running against +# default is to read it off the command line +cluster_name='disorder_replica' + +# Set slon's connection info, default is to read it off the command line +#conn_info='host=/tmp port=5432 user=slony' + +# maximum time planned for grouped SYNCs +# If replication is behind, slon will try to increase numbers of +# syncs done targetting that they should take this quantity of +# time to process. in ms +# Range [10000,600000], default 60000. +desired_sync_time=6000 + +# Execute the following SQL on each node at slon connect time +# useful to set logging levels, or to tune the planner/memory +# settings. You can specify multiple statements by separating +# them with a ; +sql_on_connection="SET log_min_duration_statement TO '1000';" + +# Command to run upon committing a log archive. +# This command is passed one parameter, namely the full pathname of +# the archive file +#command_on_logarchive="/usr/local/bin/movearchivetoarchive" + +# A PostgreSQL value compatible with ::interval which indicates how +# far behind this node should lag its providers. +# lag_interval="8 minutes" + +# Directory in which to stow sync archive files +# archive_dir="/tmp/somewhere" diff --git a/clustertest/conf/slon.5.conf.sample b/clustertest/conf/slon.5.conf.sample new file mode 100644 index 00000000..a4e834a8 --- /dev/null +++ b/clustertest/conf/slon.5.conf.sample @@ -0,0 +1,115 @@ +# Sets how many cleanup cycles to run before a vacuum is done. +# Range: [0,100], default: 3 +vac_frequency=2 + +# Aging interval to use for deleting old events and for trimming +# data from sl_log_1/sl_log_2 +cleanup_interval="30 seconds" + +# Debug log level (higher value ==> more output). Range: [0,4], default 4 +log_level=2 + +# Check for updates at least this often in milliseconds. +# Range: [10-60000], default 2000 +sync_interval=800 + +# Maximum amount of time in milliseconds before issuing a SYNC event, +# This prevents a possible race condition in which the action sequence +# is bumped by the trigger while inserting the log row, which makes +# this bump is immediately visible to the sync thread, but +# the resulting log rows are not visible yet. If the sync is picked +# up by the subscriber, processed and finished before the transaction +# commits, this transaction's changes will not be replicated until the +# next SYNC. But if all application activity suddenly stops, +# there will be no more sequence bumps, so the high frequent -s check +# won't detect that. Thus, the need for sync_interval_timeout. +# Range: [0-120000], default 10000 +sync_interval_timeout=2000 + +# Maximum number of SYNC events to group together when/if a subscriber +# falls behind. SYNCs are batched only if there are that many available +# and if they are contiguous. Every other event type in between leads to +# a smaller batch. And if there is only one SYNC available, even -g60 +# will apply just that one. As soon as a subscriber catches up, it will +# apply every single SYNC by itself. +# Range: [0,100], default: 6 +sync_group_maxsize=5 + +# Size above which an sl_log_? row's log_cmddata is considered large. +# Up to 500 rows of this size are allowed in memory at once. Rows larger +# than that count into the sync_max_largemem space allocated and free'd +# on demand. +# Range: [1024,32768], default: 8192 +sync_max_rowsize=16384 + +# Maximum amount of memory allowed for large rows. Note that the algorithm +# will stop fetching rows AFTER this amount is exceeded, not BEFORE. This +# is done to ensure that a single row exceeding this limit alone does not +# stall replication. +# Range: [1048576,1073741824], default: 5242880 +sync_max_largemem=3276800 + +# If this parameter is 1, messages go both to syslog and the standard +# output. A value of 2 sends output only to syslog (some messages will +# still go to the standard output/error). The default is 0, which means +# syslog is off. +# Range: [0-2], default: 0 +syslog=1 + +# If true, include the process ID on each log line. Default is false. +log_pid=true + +# If true, include the timestamp on each log line. Default is true. +#log_timestamp=true + +# A strftime()-conformant format string for use with log timestamps. +# Default is '%Y-%m-%d %H:%M:%S %Z' +log_timestamp_format='%Y-%m-%d %H:%M:%S' + +# An interval in seconds at which the remote worker will output the +# query used to select log rows together with it's query plan. The +# default value of 0 turns this feature off. +# Range: [0-86400], default: 0 +#explain_interval=0 + +# Where to write the pid file. Default: no pid file +pid_file='/var/lib/postgresql/slony1-engine.github/clustertest/conf/slon.5.pid' + +# Sets the syslog "facility" to be used when syslog enabled. Valid +# values are LOCAL0, LOCAL1, LOCAL2, LOCAL3, LOCAL4, LOCAL5, LOCAL6, LOCAL7. +syslog_facility=LOCAL0 + +# Sets the program name used to identify slon messages in syslog. +syslog_ident=slon + +# Set the cluster name that this instance of slon is running against +# default is to read it off the command line +cluster_name='disorder_replica' + +# Set slon's connection info, default is to read it off the command line +#conn_info='host=/tmp port=5432 user=slony' + +# maximum time planned for grouped SYNCs +# If replication is behind, slon will try to increase numbers of +# syncs done targetting that they should take this quantity of +# time to process. in ms +# Range [10000,600000], default 60000. +desired_sync_time=6000 + +# Execute the following SQL on each node at slon connect time +# useful to set logging levels, or to tune the planner/memory +# settings. You can specify multiple statements by separating +# them with a ; +sql_on_connection="SET log_min_duration_statement TO '1000';" + +# Command to run upon committing a log archive. +# This command is passed one parameter, namely the full pathname of +# the archive file +#command_on_logarchive="/usr/local/bin/movearchivetoarchive" + +# A PostgreSQL value compatible with ::interval which indicates how +# far behind this node should lag its providers. +# lag_interval="8 minutes" + +# Directory in which to stow sync archive files +# archive_dir="/tmp/somewhere" From 73a823ede533b90affdbdd11fd5ca5e27d0c023f Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Tue, 1 Feb 2011 15:55:27 -0500 Subject: [PATCH 05/11] Add extra logging to various processes --- clustertest/disorder/tests/BasicTest.js | 37 +++++++++----- clustertest/disorder/tests/BigBacklog.js | 4 ++ clustertest/disorder/tests/CloneNode.js | 10 ++++ clustertest/disorder/tests/EmptySet.js | 10 +++- clustertest/disorder/tests/ExecuteScript.js | 50 ++++++++++++++----- clustertest/disorder/tests/FailNodeTest.js | 46 +++++++++++------ clustertest/disorder/tests/Failover.js | 9 ++-- clustertest/disorder/tests/HeavyLoadTest.js | 17 +++++-- clustertest/disorder/tests/InitialCopyFail.js | 7 ++- clustertest/disorder/tests/LogShipping.js | 13 +++-- clustertest/disorder/tests/LongTransaction.js | 10 +++- clustertest/disorder/tests/MoveSet.js | 27 +++++----- clustertest/disorder/tests/MultipleOrigins.js | 10 +++- clustertest/disorder/tests/OmitCopy.js | 2 + clustertest/disorder/tests/RenameTests.js | 2 + clustertest/disorder/tests/RestartTest.js | 4 +- clustertest/disorder/tests/SlonKilling.js | 14 ++++-- .../disorder/tests/SubscribeUnderLoad.js | 9 +++- clustertest/disorder/tests/Unsubscribe.js | 6 ++- .../disorder/tests/UnsubscribeBeforeEnable.js | 4 +- 20 files changed, 209 insertions(+), 82 deletions(-) diff --git a/clustertest/disorder/tests/BasicTest.js b/clustertest/disorder/tests/BasicTest.js index cbf49b62..0d1b110e 100644 --- a/clustertest/disorder/tests/BasicTest.js +++ b/clustertest/disorder/tests/BasicTest.js @@ -71,8 +71,8 @@ BasicTest.prototype.getSlonikPreamble = function() { * Nothing will be subscribed though. */ BasicTest.prototype.setupReplication = function() { - - var result = 0; + this.coordinator.log("setupReplication start"); + var result = 0; var slonikPre = this.getSlonikPreamble(); var slonikScript = 'echo \'BasicTest.prototype.setupReplication\';\n'; for ( var idx = 1; idx <= this.getNodeCount(); idx++) { @@ -143,6 +143,7 @@ BasicTest.prototype.setupReplication = function() { Packages.info.slony.clustertest.testcoordinator.Coordinator.EVENT_FINISHED, finishedObserver); + this.coordinator.log("setupReplication end"); return slonik.getReturnCode(); @@ -306,6 +307,7 @@ BasicTest.prototype.createDb=function(dbnames) { BasicTest.prototype.postSeedSetup=function(dbnamelist) { var schemasql = this.coordinator.readFile("disorder/sql/disorder-2.sql"); var psqlArray=[]; + this.coordinator.log("BasicTest.prototype.postSeedSetup begin"); for(var idx=0; idx < dbnamelist.length; idx++) { psql = this.coordinator.createPsqlCommand(dbnamelist[idx], schemasql); psql.run(); @@ -315,6 +317,7 @@ BasicTest.prototype.postSeedSetup=function(dbnamelist) { for(var idx=0; idx < dbnamelist.length; idx++) { this.coordinator.join(psqlArray[idx]); } + this.coordinator.log("BasicTest.prototype.postSeedSetup complete"); } @@ -371,6 +374,7 @@ BasicTest.prototype.getSyncWaitTime = function() { * */ BasicTest.prototype.slonikSync = function(setid, originid) { + this.coordinator.log("BasicTest.prototype.slonikSync - Set["+ setid + "] origin["+originid + "] - start"); var slonikPre = this.getSlonikPreamble(); var slonikScript = 'echo \'BasicTest.prototype.slonikSync\';\n'; slonikScript += ' sync(id=' + originid + ');\n'; @@ -424,12 +428,14 @@ BasicTest.prototype.slonikSync = function(setid, originid) { Packages.info.slony.clustertest.testcoordinator.Coordinator.EVENT_FINISHED, finishedObserver); + this.coordinator.log("BasicTest.prototype.slonikSync - Set["+ setid + "] origin["+originid + "] - complete"); return slonik.getReturnCode(); } /** * Moves a set (setid) from the origin node to the destination node. */ BasicTest.prototype.moveSet = function(setid, origin_node, destination_node) { + this.coordinator.log("BasicTest.prototype.moveSet - Set["+ setid + "] origin["+origin_node + "] - destination["+ destination_node+"] - start"); var preamble = this.getSlonikPreamble(); var slonikScript = 'echo \'BasicTest.prototype.moveSet\';\n'; @@ -443,6 +449,7 @@ BasicTest.prototype.moveSet = function(setid, origin_node, destination_node) { this.coordinator.join(slonik); this.testResults.assertCheck('move set succeeded', slonik.getReturnCode(), 0); + this.coordinator.log("BasicTest.prototype.moveSet - Set["+ setid + "] origin["+origin_node + "] - destination["+ destination_node+"] - complete"); return slonik.getReturnCode(); } @@ -462,6 +469,7 @@ BasicTest.prototype.moveSet = function(setid, origin_node, destination_node) { BasicTest.prototype.subscribeSetBackground = function(setid, origin_node, provider_node, subscriber_nodes) { + this.coordinator.log("BasicTest.prototype.subscribeSetBackground - begin"); var slonikScript = 'echo \'BasicTest.prototype.subscribeSetBackground\';\n'; var preamble = this.getSlonikPreamble(); var slonikList = []; @@ -511,6 +519,7 @@ BasicTest.prototype.subscribeSetBackground = function(setid, origin_node, } + this.coordinator.log("BasicTest.prototype.subscribeSetBackground - complete"); return slonikList; } @@ -531,6 +540,7 @@ BasicTest.prototype.subscribeSet = function(set_id, origin_node,provider_node, * leave the databases in a clean state for the next test. */ BasicTest.prototype.teardownSlony = function() { + this.coordinator.log("BasicTest.prototype.teardownSlony - begin"); var slonikPre = this.getSlonikPreamble(); var slonikScript = 'echo \'BasicTest.prototype.teardownSlony\';\n'; for ( var idx = 1; idx <= this.getNodeCount(); idx++) { @@ -563,6 +573,7 @@ BasicTest.prototype.teardownSlony = function() { finishedObserver); slonik.run(); + this.coordinator.log("BasicTest.prototype.teardownSlony - complete"); this.coordinator.join(slonik); } @@ -571,26 +582,27 @@ BasicTest.prototype.teardownSlony = function() { BasicTest.prototype.generateLoad = function(set_origin) { + this.coordinator.log("BasicTest.prototype.generateLoad - origin[" + set_origin + "] - start"); var disorderClientJs = this.coordinator.readFile('disorder/client/disorder.js'); disorderClientJs+= this.coordinator.readFile('disorder/client/run_fixed_load.js'); var load = this.coordinator.clientScript(disorderClientJs,this.getCurrentOrigin()); load.run(); + this.coordinator.log("BasicTest.prototype.generateLoad - origin[" + set_origin + "] - complete"); return load; } BasicTest.prototype.seedData = function(scaling) { - - this.coordinator.log("Seeding data with scaling " + scaling); + this.coordinator.log("Seeding data with scaling " + scaling + " - begin"); var populatePsql = this.coordinator.createPsqlCommand('db1', 'SET SEARCH_PATH=disorder,public; SELECT disorder.populate(' + scaling + ');'); populatePsql.run(); + this.coordinator.log("Seeding data with scaling " + scaling + " - complete"); return populatePsql; - } BasicTest.prototype.compareDb=function(lhs_db, rhs_db) { //Compare the results. - this.coordinator.log('comparing' + lhs_db + 'rhs_db'); + this.coordinator.log("BasicTest.prototype.compareDb ["+lhs_db + ","+rhs_db + "] - begin"); var queryList = [ ['SELECT c_id,c_name,c_total_orders,c_total_value FROM disorder.do_customer order by c_id','c_id'] ,['SELECT i_id,i_name,i_price,i_in_production FROM disorder.do_item order by i_id','i_id'] @@ -620,8 +632,7 @@ BasicTest.prototype.compareDb=function(lhs_db, rhs_db) { //At some point all the compare could be done concurrently? this.coordinator.join(compareOp); } - - + this.coordinator.log("BasicTest.prototype.compareDb ["+lhs_db + ","+rhs_db + "] - complete"); } BasicTest.prototype.getClusterName = function () { @@ -638,6 +649,7 @@ BasicTest.prototype.getClusterName = function () { * */ BasicTest.prototype.createSecondSet=function(origin) { + this.coordinator.log("BasicTest.prototype.createSecondSet [" + origin + "] - begin"); var slonikPreamble = this.getSlonikPreamble(); var slonikScript = 'echo \'BasicTest.prototype.createSecondSet\';\n'; slonikScript += 'create set(id=2, origin=' + origin + ',comment=\'second set\');\n' @@ -653,7 +665,7 @@ BasicTest.prototype.createSecondSet=function(origin) { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('create set to succeeded',slonik.getReturnCode(),0); - + this.coordinator.log("BasicTest.prototype.createSecondSet [" + origin + "] - complete"); } /** @@ -684,6 +696,7 @@ BasicTest.prototype.measureLag = function(event_node, lag_node) { } BasicTest.prototype.startDataChecks=function(node_id) { + this.coordinator.log("BasicTest.prototype.startDataChecks node[" + node_id + "] - begin"); var disorderClientJs = this.coordinator.readFile('disorder/client/disorder.js'); disorderClientJs+= this.coordinator.readFile('disorder/client/run_check_load.js'); var load = this.coordinator.clientScript(disorderClientJs,'db' + node_id); @@ -696,8 +709,8 @@ BasicTest.prototype.startDataChecks=function(node_id) { this.coordinator.registerObserver(load,Packages.info.slony.clustertest.testcoordinator.Coordinator.EVENT_ERROR, new Packages.info.slony.clustertest.testcoordinator.script.ExecutionObserver(failOnError)); load.run(); + this.coordinator.log("BasicTest.prototype.startDataChecks node[" + node_id + "] - complete"); return load; - } /** @@ -721,9 +734,7 @@ BasicTest.prototype.verifyReadOnly=function(node_id) { stat.close(); connection.close(); } - - - + this.coordinator.log('verifying read only status of node ' + node_id + " - complete"); } BasicTest.prototype.getCurrentOrigin=function() { return this.currentOrigin; diff --git a/clustertest/disorder/tests/BigBacklog.js b/clustertest/disorder/tests/BigBacklog.js index cfb71720..8056c7ec 100644 --- a/clustertest/disorder/tests/BigBacklog.js +++ b/clustertest/disorder/tests/BigBacklog.js @@ -16,6 +16,7 @@ BigBacklogTest.prototype.constructor = BigBacklogTest; BigBacklogTest.prototype.runTest = function() { + this.coordinator.log("BigBacklogTest.prototype.runTest - start"); this.testResults.newGroup("Big Backlog "); this.setupReplication(); @@ -113,9 +114,11 @@ BigBacklogTest.prototype.runTest = function() { this.compareDb('db4','db6'); this.dropDb(['db6']); + this.coordinator.log("BigBacklogTest.prototype.runTest - complete"); } BigBacklogTest.prototype.startSlons=function() { + this.coordinator.log("BigBacklogTest.prototype.startSlons - begin"); for(var idx=1; idx <= this.getNodeCount(); idx++) { @@ -128,4 +131,5 @@ BigBacklogTest.prototype.startSlons=function() { } this.slonArray[idx-1].run(); } + this.coordinator.log("BigBacklogTest.prototype.startSlons - complete"); } \ No newline at end of file diff --git a/clustertest/disorder/tests/CloneNode.js b/clustertest/disorder/tests/CloneNode.js index 442d7742..b191eac6 100644 --- a/clustertest/disorder/tests/CloneNode.js +++ b/clustertest/disorder/tests/CloneNode.js @@ -13,6 +13,8 @@ CloneNode.prototype.constructor = EmptySet; CloneNode.prototype.runTest = function() { + this.coordinator.log("CloneNode.prototype.runTest - begin"); + this.testResults.newGroup("Clone Node"); this.setupReplication(); @@ -22,27 +24,33 @@ CloneNode.prototype.runTest = function() { slonArray[idx-1].run(); } this.addTables(); + this.coordinator.log("CloneNode.prototype.runTest - subscribe sets"); this.subscribeSet(1, 1,1,[2]); + this.coordinator.log("CloneNode.prototype.runTest - CLONE PREPARE"); var preamble = this.getSlonikPreamble(); var script = 'CLONE PREPARE(id=6, provider=2);'; var slonik = this.coordinator.createSlonik('clone',preamble,script); slonik.run(); this.coordinator.join(slonik); + this.coordinator.log("CloneNode.prototype.runTest - pgdump_db2 to generate new node"); var dumpFile = java.io.File.createTempFile('pgdump_db2','.sql'); //Now pg_dump the database. var pg_dump = this.coordinator.createPgDumpCommand("db2",dumpFile.getAbsolutePath(),null,false) pg_dump.run(); this.coordinator.join(pg_dump); + this.coordinator.log("CloneNode.prototype.runTest - generate node db6"); var createDb = this.coordinator.createCreateDb('db6'); createDb.run(); this.coordinator.join(createDb); this.testResults.assertCheck('db6 created okay',createDb.getReturnCode(),0); + this.coordinator.log("CloneNode.prototype.runTest - load dump to node db6"); var restorePsql = this.coordinator.createPsqlCommand("db6",dumpFile); restorePsql.run(); this.coordinator.join(restorePsql); this.testResults.assertCheck('database restored okay',restorePsql.getReturnCode(),0); + this.coordinator.log("CloneNode.prototype.runTest - add db6 to cluster"); //Now run slonik to finish things off. @@ -81,6 +89,7 @@ CloneNode.prototype.runTest = function() { slon6.stop(); this.coordinator.join(slon6); + this.coordinator.log("CloneNode.prototype.runTest - db6 now in cluster"); for(var idx=1; idx <= this.getNodeCount(); idx++) { slonArray[idx-1].stop(); @@ -89,6 +98,7 @@ CloneNode.prototype.runTest = function() { var dropDb = this.coordinator.createDropDbCommand('db6'); dropDb.run(); this.coordinator.join(dropDb); + this.coordinator.log("CloneNode.prototype.runTest - complete"); } diff --git a/clustertest/disorder/tests/EmptySet.js b/clustertest/disorder/tests/EmptySet.js index 94ef62d8..7b4850c1 100644 --- a/clustertest/disorder/tests/EmptySet.js +++ b/clustertest/disorder/tests/EmptySet.js @@ -9,11 +9,13 @@ EmptySet.prototype = new BasicTest(); EmptySet.prototype.constructor = EmptySet; EmptySet.prototype.runTest = function() { + this.coordinator.log("EmptySet.prototype.runTest - begin"); this.testResults.newGroup("Empty set"); //this.prepareDb(['db1','db2','db3','db4','db5']); this.setupReplication(); + this.coordinator.log("EmptySet.prototype.runTest - start slons"); /** * Start the slons. */ @@ -23,6 +25,7 @@ EmptySet.prototype.runTest = function() { slonArray[idx-1].run(); } + this.coordinator.log("EmptySet.prototype.runTest - subscribe empty set"); /** * Subscribe the empty set (we have not added anything). */ @@ -34,6 +37,7 @@ EmptySet.prototype.runTest = function() { } + this.coordinator.log("EmptySet.prototype.runTest - create second set"); /** * Create a second set */ @@ -45,6 +49,7 @@ EmptySet.prototype.runTest = function() { this.coordinator.join(slonik); this.testResults.assertCheck('second set created okay',slonik.getReturnCode(),0); + this.coordinator.log("EmptySet.prototype.runTest - merge second set before subscription"); /** * Try merging the set. * This SHOULD fail. set 1 and 2 have different subscribers. @@ -55,6 +60,7 @@ EmptySet.prototype.runTest = function() { this.coordinator.join(slonik); this.testResults.assertCheck('merging unsubscribed set caused an error',slonik.getReturnCode()!=0,true); + this.coordinator.log("EmptySet.prototype.runTest - subscribe second set"); /** * Subscribe the set(remember it is empty), then merge. */ @@ -79,12 +85,12 @@ EmptySet.prototype.runTest = function() { slonik.run(); this.coordinator.join(slonik); + this.coordinator.log("EmptySet.prototype.runTest - merging second set"); this.testResults.assertCheck('merging empty set',slonik.getReturnCode(),0); - this.coordinator.log('Merging test finished'); - + this.coordinator.log("EmptySet.prototype.runTest - test complete"); for(var idx=1; idx <= this.getNodeCount(); idx++) { slonArray[idx-1].stop(); diff --git a/clustertest/disorder/tests/ExecuteScript.js b/clustertest/disorder/tests/ExecuteScript.js index cd034c86..d877f609 100644 --- a/clustertest/disorder/tests/ExecuteScript.js +++ b/clustertest/disorder/tests/ExecuteScript.js @@ -16,24 +16,28 @@ ExecuteScript.prototype.constructor = ExecuteScript; ExecuteScript.prototype.runTest = function() { + this.coordinator.log("ExecuteScript.prototype.runTest - begin"); this.testResults.newGroup("Execute Script"); this.setupReplication(); /** * Start the slons. */ + this.coordinator.log("ExecuteScript.prototype.runTest - start slons"); var slonArray = []; for ( var idx = 1; idx <= this.getNodeCount(); idx++) { slonArray[idx - 1] = this.coordinator.createSlonLauncher('db' + idx); slonArray[idx - 1].run(); } + this.coordinator.log("ExecuteScript.prototype.runTest - add tables"); /** * Add some tables to replication. * */ this.addTables(); + this.coordinator.log("ExecuteScript.prototype.runTest - subscribe first set"); /** * Subscribe the first node. */ @@ -42,6 +46,7 @@ ExecuteScript.prototype.runTest = function() { this.testAddDropColumn(1, 1, false); + this.coordinator.log("ExecuteScript.prototype.runTest - subscribe first set"); /** * Now create a second replication set. * @@ -58,6 +63,7 @@ ExecuteScript.prototype.runTest = function() { */ this.subscribeSet(2, 2,3, [ 4, 5 ]); + this.coordinator.log("ExecuteScript.prototype.runTest - move set to node 1"); /** * Move the set to node 1. We want to do this for the next test. */ @@ -105,7 +111,7 @@ ExecuteScript.prototype.runTest = function() { * Now we generate READ only loads on all nodes. * We then start doing execute scripts. */ - this.coordinator.log('starting execute script with read queries on all nodes'); + this.coordinator.log("ExecuteScript.prototype.runTest - starting execute script with read queries on all nodes"); var readLoad=[]; for(var idx=0; idx < this.getNodeCount(); idx++) { readLoad[idx] = this.startDataChecks(idx+1); @@ -133,6 +139,7 @@ ExecuteScript.prototype.runTest = function() { slonArray[idx - 1].stop(); this.coordinator.join(slonArray[idx - 1]); } + this.coordinator.log("ExecuteScript.prototype.runTest - complete"); } @@ -160,12 +167,13 @@ ExecuteScript.prototype.validateDdl = function(dbname) { } ExecuteScript.prototype.testAddDropColumn = function(setid, eventNode, expectFailure) { - this.coordinator.log('testAddDropColumn'); + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - begin"); /** * start up some load. */ var load = this.generateLoad(); + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - add column to orders"); /** * Now add a column to orders. We will do this via EXECUTE SCRIPT. */ @@ -214,14 +222,18 @@ ExecuteScript.prototype.testAddDropColumn = function(setid, eventNode, return; } + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - terminate load"); load.stop(); this.coordinator.join(load); + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - synchronize"); this.slonikSync(1, 1); + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - compare databases"); this.compareDb('db1', 'db2'); this.compareDb('db1', 'db3'); this.compareDb('db1', 'db4'); + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - execute ONLY ON"); // Now execute ONLY ON. // We DROP the column, on the master first then moving out. load = this.generateLoad(); @@ -256,11 +268,11 @@ ExecuteScript.prototype.testAddDropColumn = function(setid, eventNode, this.compareDb('db1', 'db3'); this.compareDb('db1', 'db4'); - - + this.coordinator.log("ExecuteScript.prototype.testAddDropColumn - complete"); } ExecuteScript.prototype.createAndReplicateTestTable=function() { + this.coordinator.log("ExecuteScript.prototype.createAndReplicateTestTable - begin"); var scriptFile = java.io.File.createTempFile('executeScript', '.sql'); scriptFile.deleteOnExit(); fileWriter = new java.io.FileWriter(scriptFile); @@ -289,9 +301,11 @@ ExecuteScript.prototype.createAndReplicateTestTable=function() { // Explicitly leave node 2 out of this. this.subscribeSet(3,1, 1, [ 3 ]); this.subscribeSet(3,1, 3, [ 4, 5 ]); + this.coordinator.log("ExecuteScript.prototype.createAndReplicateTestTable - complete"); } ExecuteScript.prototype.createAddTestTable = function() { + this.coordinator.log("ExecuteScript.prototype.createAddTestTable - begin"); this.coordinator.log('createAddTestTable'); this.createAndReplicateTestTable(); @@ -323,7 +337,7 @@ ExecuteScript.prototype.createAddTestTable = function() { //this.coordinator.join(slonik); //this.testResults.assertCheck('drop removed table worked okay',slonik.getReturnCode(),0); this.moveSet(3, 1, 3); - + this.coordinator.log("ExecuteScript.prototype.createAddTestTable - complete"); } /** @@ -339,18 +353,18 @@ ExecuteScript.prototype.createAddTestTable = function() { * */ ExecuteScript.prototype.testDDLFailure = function() { - this.coordinator.log("entering testDDL failure"); + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - begin"); var scriptFile = java.io.File.createTempFile('executeScript', '.sql'); scriptFile.deleteOnExit(); fileWriter = new java.io.FileWriter(scriptFile); - fileWriter - .write('CREATE TABLE disorder.test_transient(id serial,data text, primary key(id));'); + fileWriter.write('CREATE TABLE disorder.test_transient(id serial,data text, primary key(id));'); fileWriter.close(); var connection = this.coordinator.createJdbcConnection('db2'); var statement = connection.createStatement(); statement.execute('CREATE TABLE disorder.test_transient(id int4, data text,primary key(id));'); + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - failure expected on node 2"); /** * First execute the script against node 2, it should fail. */ @@ -362,6 +376,8 @@ ExecuteScript.prototype.testDDLFailure = function() { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('ddl failed as expected',slonik.getReturnCode(),255); + + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - failure expected on event node"); /** * Now try it against an event node that it should succeed. */ @@ -372,6 +388,7 @@ ExecuteScript.prototype.testDDLFailure = function() { this.coordinator.join(slonik); this.testResults.assertCheck('ddl accepted on node 1',slonik.getReturnCode(),0); + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - paused replication to node 2"); /** * Replication to node 2 should now be 'paused', */ @@ -379,6 +396,8 @@ ExecuteScript.prototype.testDDLFailure = function() { var lag = this.measureLag(1,2); this.coordinator.log('we expect lag, we measure it as ' + lag); this.testResults.assertCheck('node is lagged', lag >= 10,true); + + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - lag analysis"); /** * Allow the DDL to work on node 2. */ @@ -392,7 +411,7 @@ ExecuteScript.prototype.testDDLFailure = function() { statement.close(); connection.close(); - + this.coordinator.log("ExecuteScript.prototype.testDDLFailure - complete"); } /** @@ -403,7 +422,7 @@ ExecuteScript.prototype.testDDLFailure = function() { * */ ExecuteScript.prototype.dropTestTable=function(node_id,set_id,removeFromReplication) { - this.coordinator.log('dropTestTable ' + node_id + ',' + set_id); + this.coordinator.log('ExecuteScript.prototype.dropTestTable ' + node_id + ',' + set_id); var slonikPreamble = this.getSlonikPreamble(); var scriptFile_drop = java.io.File.createTempFile('executeScript', '.sql'); var fileWriter = new java.io.FileWriter(scriptFile_drop); @@ -424,8 +443,10 @@ ExecuteScript.prototype.dropTestTable=function(node_id,set_id,removeFromReplicat this.testResults.assertCheck('slonik executed drop table okay', slonik .getReturnCode(), 0); + this.coordinator.log('ExecuteScript.prototype.dropTestTable ' + node_id + ',' + set_id+ " complete"); } ExecuteScript.prototype.dropSet3 = function(set_origin) { + this.coordinator.log('ExecuteScript.prototype.dropSet3 ' + set_origin + " - begin"); var slonikPreamble = this.getSlonikPreamble(); var slonikScript ='echo \'ExecuteScript.prototype.dropSet3\';\n'; slonikScript =+ 'drop set (id=3,origin=' + set_origin + ');' @@ -434,6 +455,7 @@ ExecuteScript.prototype.dropSet3 = function(set_origin) { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('drop set 3 worked as expected',slonik.getReturnCode(),0); + this.coordinator.log('ExecuteScript.prototype.dropSet3 ' + set_origin + " - complete"); } @@ -444,7 +466,7 @@ ExecuteScript.prototype.dropSet3 = function(set_origin) { * */ ExecuteScript.prototype.performInsert=function(node_id) { - this.coordinator.log("performInsert on node " + node_id); + this.coordinator.log("ExecuteScript.prototype.performInsert on node " + node_id + " - begin"); var slonikPreamble = this.getSlonikPreamble(); var scriptFile = java.io.File.createTempFile('executeScript', '.sql'); var fileWriter = new java.io.FileWriter(scriptFile); @@ -461,6 +483,7 @@ ExecuteScript.prototype.performInsert=function(node_id) { // Generate some WRITE load. // We want the EXECUTE script to go in sequence. + this.coordinator.log("ExecuteScript.prototype.performInsert on node " + node_id + " - imposing load"); var disorderClientJs = this.coordinator.readFile('disorder/client/disorder.js'); disorderClientJs+= this.coordinator.readFile('disorder/client/run_fixed_load.js'); @@ -469,18 +492,19 @@ ExecuteScript.prototype.performInsert=function(node_id) { slonik.run(); + this.coordinator.log("ExecuteScript.prototype.performInsert on node " + node_id + " - performing drop table"); this.coordinator.join(slonik); this.testResults.assertCheck('slonik executed drop table okay', slonik .getReturnCode(), 0); load.stop(); this.coordinator.join(load); + this.coordinator.log("ExecuteScript.prototype.performInsert on node " + node_id + " - SYNC, compare DBs"); this.slonikSync(1,1); this.compareDb('db1','db2'); this.compareDb('db1','db3'); this.compareDb('db1','db4'); this.compareDb('db1','db5'); - - + this.coordinator.log("ExecuteScript.prototype.performInsert on node " + node_id + " - complete"); } diff --git a/clustertest/disorder/tests/FailNodeTest.js b/clustertest/disorder/tests/FailNodeTest.js index 8ff14352..6bd02331 100644 --- a/clustertest/disorder/tests/FailNodeTest.js +++ b/clustertest/disorder/tests/FailNodeTest.js @@ -27,13 +27,13 @@ FailNodeTest.prototype = new BasicTest(); FailNodeTest.prototype.constructor=FailNodeTest; FailNodeTest.prototype.runTest = function() { - - + this.coordinator.log("FailNodeTest.prototype.runTest - begin"); this.testResults.newGroup("Fail Node Test"); //this.prepareDb(['db1','db2']); //First setup slony + this.coordinator.log("FailNodeTest.prototype.runTest - configure replication"); this.setupReplication(); this.addCompletePaths(); @@ -42,22 +42,25 @@ FailNodeTest.prototype.runTest = function() { //Start the slons. //These must be started before slonik runs or the subscribe won't happen //thus slonik won't finish. + this.coordinator.log("FailNodeTest.prototype.runTest - start slons"); this.slonArray=[]; for(var idx=1; idx <= this.getNodeCount(); idx++) { this.slonArray[idx-1] = this.coordinator.createSlonLauncher('db' + idx); this.slonArray[idx-1].run(); } - this.coordinator.log('performing initial subscriptions'); + this.coordinator.log("FailNodeTest.prototype.runTest - subscribe sets - begin"); this.subscribeSet(1,1,1,[2,3]); this.subscribeSet(1,1,3,[4,5]); - this.coordinator.log('subscriptions complete'); + this.coordinator.log("FailNodeTest.prototype.runTest - subscribe sets - complete"); + this.coordinator.log("FailNodeTest.prototype.runTest - impose load"); var load = this.generateLoad(); load.run(); + this.coordinator.log("FailNodeTest.prototype.runTest - failing node 4"); this.slonikSync(1, 1); this.coordinator.log('failing node 4'); this.failNode(4,false); @@ -76,6 +79,7 @@ FailNodeTest.prototype.runTest = function() { //this.slonArray[3] = this.coordinator.createSlonLauncher('db4'); //this.slonArray[3].run(); //Readd the node. + this.coordinator.log("FailNodeTest.prototype.runTest - re-add node 4"); this.coordinator.log('adding node back'); this.reAddNode(4,1,3); @@ -99,14 +103,16 @@ FailNodeTest.prototype.runTest = function() { * Sleep a bit. * Do we need to do this for the paths to propogate???? */ + this.coordinator.log("FailNodeTest.prototype.runTest - sleeping 60x1000"); java.lang.Thread.sleep(60*1000); - this.coordinator.log('restarting slons'); + this.coordinator.log("FailNodeTest.prototype.runTest - restart slons"); for(var idx=1; idx <= this.getNodeCount(); idx++) { this.slonArray[idx-1].stop(); this.coordinator.join(this.slonArray[idx-1]); this.slonArray[idx-1] = this.coordinator.createSlonLauncher('db' + idx); this.slonArray[idx-1].run(); } + this.coordinator.log("FailNodeTest.prototype.runTest - sleeping 60x1000"); java.lang.Thread.sleep(60*1000); /** * Replace the generateSlonikWait function with a version that @@ -130,6 +136,7 @@ FailNodeTest.prototype.runTest = function() { * SUBSCRIBE nodes 4,5 via node 1 directly. */ + this.coordinator.log("FailNodeTest.prototype.runTest - subscribe 4,5 via 1"); this.subscribeSet(1,1,1,[4,5]); @@ -137,18 +144,22 @@ FailNodeTest.prototype.runTest = function() { /** * Now we should be able to drop node 3. */ + this.coordinator.log("FailNodeTest.prototype.runTest - fail node 3"); this.failNode(3,false); this.generateSlonikWait=originalGenerateWait; load.stop(); + this.coordinator.log("FailNodeTest.prototype.runTest - sync"); this.slonikSync(1,1); + this.coordinator.log("FailNodeTest.prototype.runTest - compare db1,2,4,5"); // Run some comparisions this.compareDb('db1','db2'); this.compareDb('db1','db4'); this.compareDb('db1','db5'); //Start the load again. + this.coordinator.log("FailNodeTest.prototype.runTest - add load"); load = this.generateLoad(); load.run(); /** @@ -160,6 +171,7 @@ FailNodeTest.prototype.runTest = function() { /** * Generate 10 seconds of load. */ + this.coordinator.log("FailNodeTest.prototype.runTest - sleep 10x1000"); java.lang.Thread.sleep(10*1000); load.stop(); this.coordinator.join(load); @@ -169,19 +181,24 @@ FailNodeTest.prototype.runTest = function() { this.slonikSync(1,4); this.slonikSync(1,5); + this.coordinator.log("FailNodeTest.prototype.runTest - subscribe 4,5"); this.subscribeSet(1,1,2,[4,5]); + this.coordinator.log("FailNodeTest.prototype.runTest - sync"); this.slonikSync(1,1); + this.coordinator.log("FailNodeTest.prototype.runTest - compare DBs"); this.compareDb('db1','db2'); this.compareDb('db1','db4'); this.compareDb('db1','db5'); + this.coordinator.log("FailNodeTest.prototype.runTest - generate load"); //More load. load = this.generateLoad(); load.run(); //Now kill the node 2 slon. this.slonArray[1].stop(); this.coordinator.join(this.slonArray[1]); + this.coordinator.log("FailNodeTest.prototype.runTest - drop DB2"); //Now DROP the database. This lets us simulate a hard failure. this.dropDb(['db2']); /** @@ -203,29 +220,29 @@ FailNodeTest.prototype.runTest = function() { return script; } + this.coordinator.log("FailNodeTest.prototype.runTest - reshape cluster"); //Now reshape the cluster. this.subscribeSet(1,1,1,[4,5]); + this.coordinator.log("FailNodeTest.prototype.runTest - drop node 2"); //Drop node 2. this.failNode(2, false); + this.coordinator.log("FailNodeTest.prototype.runTest - sleep 10x1000"); java.lang.Thread.sleep(10*1000); load.stop(); this.coordinator.join(load); this.slonikSync(1,1); - this.coordinator.log('Stopping slons'); + this.coordinator.log("FailNodeTest.prototype.runTest - terminate slons"); for(var idx=0; idx < this.slonArray.length; idx++) { this.slonArray[idx].stop(); this.coordinator.join(this.slonArray[idx]); } - - - - + this.coordinator.log("FailNodeTest.prototype.runTest - complete"); } /** @@ -237,6 +254,7 @@ FailNodeTest.prototype.runTest = function() { * 2. executing DROP NODE. */ FailNodeTest.prototype.failNode=function(nodeId, expectFailure) { + this.coordinator.log("FailNodeTest.prototype.FailNodeTest - begin"); this.slonArray[nodeId-1].stop(); this.coordinator.join(this.slonArray[nodeId-1]); var slonikPreamble = this.getSlonikPreamble(); @@ -258,7 +276,7 @@ FailNodeTest.prototype.failNode=function(nodeId, expectFailure) { else { this.testResults.assertCheck('drop node okay',slonik.getReturnCode(),0); } - + this.coordinator.log("FailNodeTest.prototype.FailNodeTest - complete"); } FailNodeTest.prototype.checkNodeNotExists=function(check_node,nodeid) { @@ -280,7 +298,7 @@ FailNodeTest.prototype.checkNodeNotExists=function(check_node,nodeid) { } FailNodeTest.prototype.reAddNode = function(node_id,origin,provider) { - this.coordinator.log('reAddNode(' + node_id + ',' + provider + ')'); + this.coordinator.log("FailNodeTest.prototype.reAddNode(" + node_id + ',' + provider + ') - begin'); var slonikPreamble = this.getSlonikPreamble(); var slonikScript = 'echo \'FailNodeTest.prototype.reAddNode\';\n'; slonikScript += 'try {\n' @@ -329,10 +347,8 @@ FailNodeTest.prototype.reAddNode = function(node_id,origin,provider) { this.coordinator.join(this.slonArray[provider-1]); this.slonArray[provider-1] = this.coordinator.createSlonLauncher('db' + provider); this.slonArray[provider-1].run(); - - this.subscribeSet(1, origin,provider,[node_id]); - + this.coordinator.log("FailNodeTest.prototype.reAddNode(" + node_id + ',' + provider + ') - complete'); } diff --git a/clustertest/disorder/tests/Failover.js b/clustertest/disorder/tests/Failover.js index 9376a6e7..d3e8dab5 100644 --- a/clustertest/disorder/tests/Failover.js +++ b/clustertest/disorder/tests/Failover.js @@ -18,7 +18,7 @@ Failover.prototype = new FailNodeTest(); Failover.prototype.constructor = Failover; Failover.prototype.runTest = function() { - + this.coordinator.log("Failover.prototype.runTest - begin"); this.testResults.newGroup("Fail Over Test"); this.setupReplication(); this.addCompletePaths(); @@ -322,10 +322,11 @@ Failover.prototype.addCompletePaths = function() { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('paths added okay', slonik.getReturnCode(), 0); - + this.coordinator.log("Failover.prototype.runTest - begin"); } Failover.prototype.dropNode=function(node_id,event_node) { + this.coordinator.log("Failover.prototype.dropNode - begin"); var slonikPreamble = this.getSlonikPreamble(); var slonikScript = 'echo \'Failover.prototype.dropNode\';\n'; slonikScript += 'DROP NODE(id=' + node_id + ',event node=' + event_node +');\n'; @@ -339,7 +340,5 @@ Failover.prototype.dropNode=function(node_id,event_node) { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck('slonik drop node status okay',slonik.getReturnCode(),0); - - - + this.coordinator.log("Failover.prototype.dropNode - complete"); } diff --git a/clustertest/disorder/tests/HeavyLoadTest.js b/clustertest/disorder/tests/HeavyLoadTest.js index c5f49651..b8ddd4dd 100644 --- a/clustertest/disorder/tests/HeavyLoadTest.js +++ b/clustertest/disorder/tests/HeavyLoadTest.js @@ -25,10 +25,12 @@ HeavyLoadTest.prototype.constructor = HeavyLoadTest; HeavyLoadTest.prototype.runTest = function() { + this.coordinator.log("HeavyLoadTest.prototype.runTest - begin"); this.testResults.newGroup("Heavy load"); this.setupReplication(); + this.coordinator.log("HeavyLoadTest.prototype.runTest - start slons"); /** * Start the slons. */ @@ -48,12 +50,14 @@ HeavyLoadTest.prototype.runTest = function() { slonArray[idx-1].run(); } + this.coordinator.log("HeavyLoadTest.prototype.runTest - add tables"); /** * Add some tables to replication. * */ this.addTables(); + this.coordinator.log("HeavyLoadTest.prototype.runTest - subscribe nodes 2,3"); /** * Subscribe node 2,3 */ @@ -61,6 +65,7 @@ HeavyLoadTest.prototype.runTest = function() { java.lang.Thread.sleep(10*1000); this.subscribeSet(1,1,3,[4,5]); + this.coordinator.log("HeavyLoadTest.prototype.runTest - add load"); //Generate some load. var populate=this.generateLoad(); this.prepareDb(['db6']); @@ -74,6 +79,7 @@ HeavyLoadTest.prototype.runTest = function() { //Drop the DB, //we want the dump to be restored into a clean state. + this.coordinator.log("HeavyLoadTest.prototype.runTest - load db6 from dump"); var loadInitial = this.coordinator.createPsqlCommand('db6',dumpFile); loadInitial.run(); this.coordinator.join(loadInitial); @@ -81,11 +87,12 @@ HeavyLoadTest.prototype.runTest = function() { loadInitial.getReturnCode(),0); + this.coordinator.log("HeavyLoadTest.prototype.runTest - turn on log shipping daemon"); //Invoke log shipping daemon. var logShippingDaemon = this.coordinator.createLogShippingDaemon('db6',this.logdirectoryFile); logShippingDaemon.run(); - this.coordinator.log('replicating at full tile for 20 minutes'); + this.coordinator.log("HeavyLoadTest.prototype.runTest - full tilt replication load test for 20 minutes"); var lastCount=0; for(var minute=0; minute <=20;minute++) { java.lang.Thread.sleep(60*1000); @@ -100,26 +107,30 @@ HeavyLoadTest.prototype.runTest = function() { stat.close(); con.close(); } + this.coordinator.log("HeavyLoadTest.prototype.runTest - full tilt replication load test complete"); populate.stop(); this.coordinator.join(populate); + this.coordinator.log("HeavyLoadTest.prototype.runTest - sync"); this.slonikSync(1, 1); + this.coordinator.log("HeavyLoadTest.prototype.runTest - compare databases"); this.compareDb('db1','db3'); this.compareDb('db1','db4'); this.compareDb('db2','db1'); this.compareDb('db4','db5'); - this.coordinator.log("Shutting down slons"); + this.coordinator.log("HeavyLoadTest.prototype.runTest - shut down slons"); for(var idx=1; idx <= this.getNodeCount(); idx++) { slonArray[idx-1].stop(); this.coordinator.join(slonArray[idx-1]); } - this.coordinator.log("waiting to give time for log shipping to catch up"); + this.coordinator.log("HeavyLoadTest.prototype.runTest - logshipping catchup - 30s"); java.lang.Thread.sleep(30*1000); logShippingDaemon.stop(); this.coordinator.join(logShippingDaemon); this.compareDb('db4','db6'); this.dropDb(['db6']); + this.coordinator.log("HeavyLoadTest.prototype.runTest - begin"); } diff --git a/clustertest/disorder/tests/InitialCopyFail.js b/clustertest/disorder/tests/InitialCopyFail.js index 3da0e483..98ba63a7 100644 --- a/clustertest/disorder/tests/InitialCopyFail.js +++ b/clustertest/disorder/tests/InitialCopyFail.js @@ -23,6 +23,7 @@ InitialCopyFail.prototype.getNodeCount=function() { InitialCopyFail.prototype.runTest = function() { + this.coordinator.log("InitialCopyFail.prototype.runTest - begin"); this.testResults.newGroup("Initial Copy Fails"); this.setupReplication(); this.addTables(); @@ -84,6 +85,7 @@ InitialCopyFail.prototype.runTest = function() { + this.coordinator.log("InitialCopyFail.prototype.runTest - start slons"); /** * Start the slons. */ @@ -108,6 +110,7 @@ InitialCopyFail.prototype.runTest = function() { */ var slonikPreamble = this.getSlonikPreamble(); + this.coordinator.log("InitialCopyFail.prototype.runTest - subscribe sets"); for(var idx=2; idx <= this.getNodeCount(); idx++) { var slonikScript = 'echo \'InitialCopyFail.prototype.runTest\';\n'; @@ -126,8 +129,10 @@ InitialCopyFail.prototype.runTest = function() { + this.coordinator.log("InitialCopyFail.prototype.runTest - sync"); //This sync should work. this.slonikSync(1,1); + this.coordinator.log("InitialCopyFail.prototype.runTest - compare db1,2,3"); this.compareDb('db1','db2'); this.compareDb('db1','db3'); @@ -156,5 +161,5 @@ InitialCopyFail.prototype.runTest = function() { } } - + this.coordinator.log("InitialCopyFail.prototype.runTest - complete"); } diff --git a/clustertest/disorder/tests/LogShipping.js b/clustertest/disorder/tests/LogShipping.js index 502d64b3..9fa6b8db 100644 --- a/clustertest/disorder/tests/LogShipping.js +++ b/clustertest/disorder/tests/LogShipping.js @@ -23,8 +23,8 @@ LogShipping.prototype.getNodeCount=function() { return 4; } -LogShipping.prototype.runTest = function() { - +LogShipping.prototype.runTest = function() { + this.coordinator.log("LogShipping.prototype.runTest - begin"); this.testResults.newGroup("Log Shipping"); this.setupReplication(); @@ -53,6 +53,7 @@ LogShipping.prototype.runTest = function() { */ this.addTables(); + this.coordinator.log("LogShipping.prototype.runTest - set up subscription"); /** * Subscribe node 3 */ @@ -60,15 +61,18 @@ LogShipping.prototype.runTest = function() { java.lang.Thread.sleep(10*1000); this.subscribeSet(1,1,3,[4]); + this.coordinator.log("LogShipping.prototype.runTest - generate load"); //Generate some load. var populate=this.generateLoad(); + this.coordinator.log("LogShipping.prototype.runTest - dump for log shipping"); var dumpFile = java.io.File.createTempFile('slon_logshipping','.sql'); //dumpFile.deleteOnExit(); var dumpProcess = this.coordinator.createLogShippingDump('db4',dumpFile); dumpProcess.run(); this.coordinator.join(dumpProcess); + this.coordinator.log("LogShipping.prototype.runTest - drop DB"); //Drop the DB, //we want the dump to be restored into a clean state. this.prepareDb(['db6']); @@ -79,6 +83,7 @@ LogShipping.prototype.runTest = function() { loadInitial.getReturnCode(),0); + this.coordinator.log("LogShipping.prototype.runTest - start log shipping daemon"); //Invoke log shipping daemon. var logShippingDaemon = this.coordinator.createLogShippingDaemon('db6',this.logdirectoryFile); logShippingDaemon.run(); @@ -91,6 +96,7 @@ LogShipping.prototype.runTest = function() { this.compareDb('db1','db4'); + this.coordinator.log("LogShipping.prototype.runTest - shut down slons"); this.coordinator.log("Shutting down slons"); for(var idx=1; idx <= this.getNodeCount(); idx++) { slonArray[idx-1].stop(); @@ -100,9 +106,10 @@ LogShipping.prototype.runTest = function() { java.lang.Thread.sleep(30*1000); logShippingDaemon.stop(); this.coordinator.join(logShippingDaemon); + this.coordinator.log("LogShipping.prototype.runTest - compare db4,6"); this.compareDb('db4','db6'); this.dropDb(['db6']); - + this.coordinator.log("LogShipping.prototype.runTest - complete"); } diff --git a/clustertest/disorder/tests/LongTransaction.js b/clustertest/disorder/tests/LongTransaction.js index af58ea3c..fb519392 100644 --- a/clustertest/disorder/tests/LongTransaction.js +++ b/clustertest/disorder/tests/LongTransaction.js @@ -16,7 +16,7 @@ LongTransaction.prototype.getNodeCount = function() { } LongTransaction.prototype.runTest = function() { - + this.coordinator.log("LongTransaction.prototype.runTest - begin"); this.testResults.newGroup("Long Transaction"); //this.prepareDb(['db1','db2']); @@ -28,6 +28,7 @@ LongTransaction.prototype.runTest = function() { this.addCompletePaths(); + this.coordinator.log("LongTransaction.prototype.runTest - start long transaction"); /** * Start a transaction. * Start the add table process. @@ -49,6 +50,7 @@ LongTransaction.prototype.runTest = function() { this.testResults.assertCheck('add tables completed after transaction finished',slonik.getReturnCode(),0); + this.coordinator.log("LongTransaction.prototype.runTest - start slons"); //Start the slons. //These must be started before slonik runs or the subscribe won't happen //thus slonik won't finish. @@ -59,12 +61,14 @@ LongTransaction.prototype.runTest = function() { } + this.coordinator.log("LongTransaction.prototype.runTest - subscribe sets in background"); var subs=this.subscribeSetBackground(1,1,1,[2,3]); for(var idx=0; idx < subs.length; idx++) { subs[idx].run(); } //A transaction should not block the subscription. //make sure this is the case. + this.coordinator.log("LongTransaction.prototype.runTest - sleep 3x60x1000"); java.lang.Thread.sleep(3*60*1000); for(var idx=0; idx < subs.length; idx++) { this.testResults.assertCheck('subscription blocking on the transaction', subs[idx].isFinished(),true); @@ -74,8 +78,10 @@ LongTransaction.prototype.runTest = function() { txnConnection.close(); + this.coordinator.log("LongTransaction.prototype.runTest - subscribe 4,5"); this.subscribeSet(1,1,3,[4,5]); + this.coordinator.log("LongTransaction.prototype.runTest - subscriptions complete"); this.coordinator.log('subscriptions complete'); @@ -88,6 +94,7 @@ LongTransaction.prototype.runTest = function() { load.stop(); this.slonikSync(1,1); this.coordinator.join(load); + this.coordinator.log("LongTransaction.prototype.runTest - compare db1,2,4"); this.compareDb('db1','db2'); this.compareDb('db1','db4'); @@ -107,6 +114,7 @@ LongTransaction.prototype.startTransaction=function() { rs.close(); stat.close(); return dbCon; + this.coordinator.log("LongTransaction.prototype.runTest - begin"); } LongTransaction.prototype.countOrders=function(db) { diff --git a/clustertest/disorder/tests/MoveSet.js b/clustertest/disorder/tests/MoveSet.js index 62da922e..187a2e6a 100644 --- a/clustertest/disorder/tests/MoveSet.js +++ b/clustertest/disorder/tests/MoveSet.js @@ -22,18 +22,19 @@ MoveSet.prototype.getNodeCount = function() { } MoveSet.prototype.runTest = function() { - + this.coordinator.log("MoveSet.prototype.runTest - begin"); this.testResults.newGroup("move set1"); //this.prepareDb(['db1','db2']); //First setup slony - + this.coordinator.log("MoveSet.prototype.runTest - set up replication"); this.setupReplication(); this.addCompletePaths(); this.addTables(); + this.coordinator.log("MoveSet.prototype.runTest - start slons"); //Start the slons. //These must be started before slonik runs or the subscribe won't happen //thus slonik won't finish. @@ -47,13 +48,13 @@ MoveSet.prototype.runTest = function() { this.subscribeSet(1,1,1,[2,3]); this.subscribeSet(1,1,3,[4,5]); - this.coordinator.log('subscriptions complete'); + this.coordinator.log("MoveSet.prototype.runTest - subscriptions complete"); this.syncWaitTime = 60*5; this.slonikSync("1","1"); this.syncWaitTime=60; - this.coordinator.log('sets are subscribed and data is synced'); + this.coordinator.log("MoveSet.prototype.runTest - sets subscribed, data synced"); var pairings=[ [1,2] ,[2,3] @@ -61,15 +62,15 @@ MoveSet.prototype.runTest = function() { ,[1,3] ,[3,4] ,[4,5] - ,[5,2] - ,[2,4] + ,[5,2] + ,[2,4] ,[4,1] ,[1,5] ]; for(var idx = 0; idx < pairings.length; idx++) { var curMoveNodes=pairings[idx]; - this.coordinator.log('moving set from ' + curMoveNodes[0] + ' to ' + + this.coordinator.log("MoveSet.prototype.runTest - moving set from " + curMoveNodes[0] + ' to ' + curMoveNodes[1]); var moveResult=this.moveSet(1,curMoveNodes[0],curMoveNodes[1]) @@ -82,15 +83,14 @@ MoveSet.prototype.runTest = function() { //Make sure that db1 is read only. this.verifyReadOnly(curMoveNodes[0]); - this.coordinator.log('verification done'); + this.coordinator.log("MoveSet.prototype.runTest - verification done"); load.stop(); - this.coordinator.log('joining after load'); + this.coordinator.log("MoveSet.prototype.runTest - joining after load"); this.coordinator.join(load); - this.coordinator.log('syncing after load'); + this.coordinator.log("MoveSet.prototype.runTest - syncing after load"); this.slonikSync(1,curMoveNodes[1]); - this.coordinator.log('sync done'); - if(moveResult==0) { - + this.coordinator.log("MoveSet.prototype.runTest - syncing complete"); + if(moveResult==0) { this.compareDb('db' + curMoveNodes[0],'db' + curMoveNodes[1]); } } @@ -99,6 +99,7 @@ MoveSet.prototype.runTest = function() { slonArray[idx-1].stop(); this.coordinator.join(slonArray[idx-1]); } + this.coordinator.log("MoveSet.prototype.runTest - complete"); } MoveSet.prototype.getSyncWaitTime = function () { diff --git a/clustertest/disorder/tests/MultipleOrigins.js b/clustertest/disorder/tests/MultipleOrigins.js index 67023d4b..8860d8b0 100644 --- a/clustertest/disorder/tests/MultipleOrigins.js +++ b/clustertest/disorder/tests/MultipleOrigins.js @@ -21,19 +21,22 @@ MultipleOrigins.prototype = new Failover(); MultipleOrigins.prototype.constructor = MultipleOrigins; MultipleOrigins.prototype.runTest = function() { - + this.coordinator.log("MultipleOrigins.prototype.runTest - begin"); this.testResults.newGroup("Multiple Origins"); this.setupReplication(); this.addTables(); + this.coordinator.log("MultipleOrigins.prototype.runTest - configuration configured"); /** * Start the slons. */ + this.coordinator.log("MultipleOrigins.prototype.runTest - start slons"); for(var idx=1; idx <= this.getNodeCount(); idx++) { this.slonArray[idx-1] = this.coordinator.createSlonLauncher('db' + idx); this.slonArray[idx-1].run(); } + this.coordinator.log("MultipleOrigins.prototype.runTest - subscribe empty set"); /** * Subscribe the empty set (we have not added anything). */ @@ -44,11 +47,13 @@ MultipleOrigins.prototype.runTest = function() { this.subscribeSet(1,1,3,[4,5]); } + this.coordinator.log("MultipleOrigins.prototype.runTest - subscribe empty set 2"); this.addCompletePaths(); this.createSecondSet(2); this.subscribeSet(2,2,'2','4'); + this.coordinator.log("MultipleOrigins.prototype.runTest - generate load"); /** * * 1 2 @@ -67,6 +72,7 @@ MultipleOrigins.prototype.runTest = function() { this.compareDb('db1','db3'); this.compareDb('db1','db4'); + this.coordinator.log("MultipleOrigins.prototype.runTest - move set 1-->3"); /** * MOVE SET 1===>3 */ @@ -76,6 +82,7 @@ MultipleOrigins.prototype.runTest = function() { java.lang.Thread.sleep(10*1000); load.stop(); this.coordinator.join(load); + this.coordinator.log("MultipleOrigins.prototype.runTest - sync, compare"); this.slonikSync(1,4); this.compareDb('db1','db3'); @@ -87,4 +94,5 @@ MultipleOrigins.prototype.runTest = function() { this.slonArray[idx-1].stop(); this.coordinator.join(this.slonArray[idx-1]); } + this.coordinator.log("MultipleOrigins.prototype.runTest - complete"); } diff --git a/clustertest/disorder/tests/OmitCopy.js b/clustertest/disorder/tests/OmitCopy.js index e0b5d77a..baec5e46 100644 --- a/clustertest/disorder/tests/OmitCopy.js +++ b/clustertest/disorder/tests/OmitCopy.js @@ -25,6 +25,7 @@ OmitCopy.prototype = new BasicTest(); OmitCopy.prototype.constructor = OmitCopy; OmitCopy.prototype.runTest = function() { + this.coordinator.log("OmitCopy.prototype.runTest - begin"); this.testResults.newGroup("Omit Copy"); this.setupReplication(); @@ -112,6 +113,7 @@ OmitCopy.prototype.runTest = function() { slonArray[idx-1].stop(); this.coordinator.join(slonArray[idx-1]); } + this.coordinator.log("OmitCopy.prototype.runTest - complete"); } OmitCopy.prototype.subscribeOmitCopy=function(origin,provider,subscriberNodeId,outFile) { diff --git a/clustertest/disorder/tests/RenameTests.js b/clustertest/disorder/tests/RenameTests.js index 6d547f84..913fbb9f 100644 --- a/clustertest/disorder/tests/RenameTests.js +++ b/clustertest/disorder/tests/RenameTests.js @@ -14,6 +14,7 @@ RenameTests.prototype = new ExecuteScript(); RenameTests.prototype.constructor = RenameTests; RenameTests.prototype.runTest = function() { + this.coordinator.log("RenameTests.prototype.runTest - begin"); this.testResults.newGroup("Rename tables"); this.setupReplication(); @@ -106,6 +107,7 @@ RenameTests.prototype.runTest = function() { this.coordinator.join(slonArray[idx - 1]); } + this.coordinator.log("RenameTests.prototype.runTest - complete"); } diff --git a/clustertest/disorder/tests/RestartTest.js b/clustertest/disorder/tests/RestartTest.js index 1924e837..5565a9dc 100644 --- a/clustertest/disorder/tests/RestartTest.js +++ b/clustertest/disorder/tests/RestartTest.js @@ -22,6 +22,7 @@ RestartTest.prototype = new BasicTest(); RestartTest.prototype.constructor = RestartTest; RestartTest.prototype.runTest = function() { + this.coordinator.log("RestartTest.prototype.runTest - begin"); this.testResults.newGroup("Restart test"); this.setupReplication(); @@ -66,8 +67,7 @@ RestartTest.prototype.runTest = function() { } - - + this.coordinator.log("RestartTest.prototype.runTest - complete"); } RestartTest.prototype.notifyRestart=function(node_id) { diff --git a/clustertest/disorder/tests/SlonKilling.js b/clustertest/disorder/tests/SlonKilling.js index 46189ff7..96a6a4be 100644 --- a/clustertest/disorder/tests/SlonKilling.js +++ b/clustertest/disorder/tests/SlonKilling.js @@ -16,6 +16,7 @@ SlonKilling.prototype = new BasicTest(); SlonKilling.prototype.constructor = SlonKilling; SlonKilling.prototype.runTest = function() { + this.coordinator.log("SlonKilling.prototype.runTest - begin"); this.testResults.newGroup("Slon killing"); for(var idx=0; idx < 10; idx++) { @@ -23,27 +24,31 @@ SlonKilling.prototype.runTest = function() { this.testActions(); this.teardownSlony(); } + this.coordinator.log("SlonKilling.prototype.runTest - compare db1,2,3,4,5"); this.compareDb('db1','db2'); this.compareDb('db1','db3'); this.compareDb('db1','db4'); this.compareDb('db1','db5'); - - + this.coordinator.log("SlonKilling.prototype.runTest - complete"); } + SlonKilling.prototype.testActions=function() { + this.coordinator.log("SlonKilling.prototype.testActions - begin"); this.setupReplication(); this.addTables(); /** * Start the slons. */ + this.coordinator.log("SlonKilling.prototype.testActions - start slons"); var slonArray=[]; for(var idx=1; idx <= this.getNodeCount(); idx++) { slonArray[idx-1] = this.coordinator.createSlonLauncher('db' + idx); slonArray[idx-1].run(); } + this.coordinator.log("SlonKilling.prototype.testActions - subscribe sets"); //Now subscribe the sets in the background this.addCompletePaths(); var slonikList = this.subscribeSetBackground(1,1,1,[2,3,4,5]); @@ -53,7 +58,7 @@ SlonKilling.prototype.testActions=function() { } var random = new java.util.Random(); var sleepTime = random.nextInt(60); - this.coordinator.log('sleeping for ' + sleepTime + ' seconds before killing a slon'); + this.coordinator.log("SlonKilling.prototype.testActions - sleeping for " + sleepTime + ' seconds before killing a slon'); java.lang.Thread.sleep(sleepTime); var slonToKill = random.nextInt(slonArray.length-1); slonArray[slonToKill].stop(); @@ -68,6 +73,7 @@ SlonKilling.prototype.testActions=function() { } load.stop(); this.coordinator.join(load); + this.coordinator.log("SlonKilling.prototype.testActions - sync"); this.slonikSync(1,1,60*5); for(var idx=1; idx <= this.getNodeCount(); idx++) { @@ -75,5 +81,5 @@ SlonKilling.prototype.testActions=function() { this.coordinator.join(slonArray[idx-1]); } - + this.coordinator.log("SlonKilling.prototype.testActions - complete"); } diff --git a/clustertest/disorder/tests/SubscribeUnderLoad.js b/clustertest/disorder/tests/SubscribeUnderLoad.js index 2fca4203..f04a0e25 100644 --- a/clustertest/disorder/tests/SubscribeUnderLoad.js +++ b/clustertest/disorder/tests/SubscribeUnderLoad.js @@ -16,7 +16,7 @@ SubscribeUnderLoad.prototype = new BasicTest(); SubscribeUnderLoad.prototype.constructor = SubscribeUnderLoad; SubscribeUnderLoad.prototype.runTest = function() { - + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - begin"); this.testResults.newGroup("Subscribe Under Load"); this.setupReplication(); @@ -32,6 +32,7 @@ SubscribeUnderLoad.prototype.runTest = function() { //First generate a baseline transaction rate. + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - impose load"); //Start a background client load. var seeding=this.generateLoad(); @@ -41,6 +42,7 @@ SubscribeUnderLoad.prototype.runTest = function() { */ + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - add tables"); /** * Add some tables to replication. * @@ -49,6 +51,7 @@ SubscribeUnderLoad.prototype.runTest = function() { + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - subscribe nodes"); /** * Subscribe the nodes. */ @@ -63,6 +66,7 @@ SubscribeUnderLoad.prototype.runTest = function() { seeding.stop(); this.coordinator.join(seeding); + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - sync"); this.slonikSync(1,1); for(var idx=1; idx <=this.getNodeCount(); idx++) { @@ -76,6 +80,7 @@ SubscribeUnderLoad.prototype.runTest = function() { * a subscription anyway, but shouldn't not stop. */ + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - compare db1,2,3,4,5"); this.compareDb('db1', 'db2'); this.compareDb('db1', 'db3'); this.compareDb('db1', 'db4'); @@ -86,6 +91,6 @@ SubscribeUnderLoad.prototype.runTest = function() { slonArray[idx-1].stop(); this.coordinator.join(slonArray[idx-1]); } - + this.coordinator.log("SubscribeUnderLoad.prototype.testActions - begin"); } diff --git a/clustertest/disorder/tests/Unsubscribe.js b/clustertest/disorder/tests/Unsubscribe.js index dd94c54e..9448538b 100644 --- a/clustertest/disorder/tests/Unsubscribe.js +++ b/clustertest/disorder/tests/Unsubscribe.js @@ -15,7 +15,7 @@ Unsubscribe.prototype = new BasicTest(); Unsubscribe.prototype.constructor = Unsubscribe; Unsubscribe.prototype.runTest = function() { - + this.coordinator.log("Unsubscribe.prototype.runTest - begin"); this.testResults.newGroup("Unsubscribe"); this.setupReplication(); @@ -99,10 +99,11 @@ Unsubscribe.prototype.runTest = function() { slonArray[idx-1].stop(); this.coordinator.join(slonArray[idx-1]); } - + this.coordinator.log("Unsubscribe.prototype.runTest - complete"); } Unsubscribe.prototype.unsubscribe=function(node_id,set_id,expect_success) { + this.coordinator.log("Unsubscribe.prototype.unsubscribe - begin"); var slonikPreamble = this.getSlonikPreamble(); var slonikScript = 'echo \'Unsubscribe.prototype.unsubscribe\';\n'; slonikScript +='unsubscribe set(id=' + set_id + ',receiver=' + node_id + ');\n' @@ -111,4 +112,5 @@ Unsubscribe.prototype.unsubscribe=function(node_id,set_id,expect_success) { slonik.run(); this.coordinator.join(slonik); this.testResults.assertCheck("unsubscribe node " + node_id,slonik.getReturnCode()==0,expect_success); + this.coordinator.log("Unsubscribe.prototype.unsubscribe - complete"); } diff --git a/clustertest/disorder/tests/UnsubscribeBeforeEnable.js b/clustertest/disorder/tests/UnsubscribeBeforeEnable.js index 5ec48322..662118ab 100644 --- a/clustertest/disorder/tests/UnsubscribeBeforeEnable.js +++ b/clustertest/disorder/tests/UnsubscribeBeforeEnable.js @@ -28,9 +28,8 @@ UnsubscribeBeforeEnable.prototype.getNodeCount = function() { UnsubscribeBeforeEnable.prototype.runTest = function() { + this.coordinator.log("UnsubscribeBeforeEnable.prototype.runTest - begin"); this.testResults.newGroup("unsubscribe before enable"); - - //First setup slony @@ -145,4 +144,5 @@ UnsubscribeBeforeEnable.prototype.runTest = function() { slon2.stop(); this.coordinator.join(slon1); this.coordinator.join(slon2); + this.coordinator.log("UnsubscribeBeforeEnable.prototype.runTest - complete"); } From b04d37b67011386698ea6af066cabfad98715fe0 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Tue, 1 Feb 2011 17:26:58 -0500 Subject: [PATCH 06/11] Fix misspelled names --- clustertest/regression/testinherit/testinherit.js | 2 +- clustertest/regression/testlargetuples/testlargetuples.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clustertest/regression/testinherit/testinherit.js b/clustertest/regression/testinherit/testinherit.js index cd3064fb..32a128d1 100644 --- a/clustertest/regression/testinherit/testinherit.js +++ b/clustertest/regression/testinherit/testinherit.js @@ -151,4 +151,4 @@ function get_compare_queries() { return queries; } -run_test(coordinator,'testinerit'); +run_test(coordinator,'testinherit'); diff --git a/clustertest/regression/testlargetuples/testlargetuples.js b/clustertest/regression/testlargetuples/testlargetuples.js index 0f985e83..bfac345b 100644 --- a/clustertest/regression/testlargetuples/testlargetuples.js +++ b/clustertest/regression/testlargetuples/testlargetuples.js @@ -121,4 +121,4 @@ function get_compare_queries() { return queries; } -run_test(coordinator,'testinerit'); +run_test(coordinator,'testinherit'); From ccaa2366d468fccb278e8275ad781da9270ddff3 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Thu, 3 Feb 2011 16:07:01 -0500 Subject: [PATCH 07/11] Beginnings of documentation on the new test framework. --- doc/adminguide/testbed.sgml | 204 ++++++++++++++++++++++++++++++++++-- 1 file changed, 193 insertions(+), 11 deletions(-) diff --git a/doc/adminguide/testbed.sgml b/doc/adminguide/testbed.sgml index ee1fef80..e7ab4487 100644 --- a/doc/adminguide/testbed.sgml +++ b/doc/adminguide/testbed.sgml @@ -1,22 +1,204 @@ - &slony1; Test Bed Framework + &slony1; Test Suites + + &slony1; has had (thus far) three test suites: + + + Ducttape tests + + These were introduced as part of the original &slony1; +distribution, and induced load via running +pgbench. + + Unfortunately, the tests required human intervention to control +invokation and shutdown of tests, so running them could not be readily +automated. + + Test bed framework + + + &slony1; version 1.1.5, introduced a test framework intended to +better support automation of the tests. It eliminated the use of +xterm, and tests were self-contained and +self-controlled, so that one could run a series of tests. + + Unfortunately, the framework did not include any way of +inducing distributed load, so as to test scenarios involving +sophisticated concurrent activity. + + clustertest framework + + + Introduced during testing of &slony1; version 2.0 during 2010, +and released in early 2011, this framework is intended to be a better +replacement for all of the preceding test +frameworks. + + + + Clustertest Test Framework + +Introduction and Overview + + The clustertest framework is implemented in Java, where tests +are implemented in the interpreted JavaScript language. The use of +Java made it much easier to implement tests involving concurrent +activities, both in terms of inducing test load, and in, concurrently +changing configuration of the replication cluster. + + It consists of two physical portions: + + + A framework, implemented in Java + + This software is available at +clustertest-framework @ GitHub . + + This framework makes use of libraries from several other open source projects: + + +js.jar + + This is for org.mozilla.javascript, the Mozilla JavaScript interpreter + +junit-4.8.1.jar + + JUnit, a unit test framework. + + log4j-1.2.15.jar + + Log4J is a popular Java-based framework for generating event +logs. + +postgresql-8.4-701.jdbc3.jar + + This is the &postgres; JDBC driver. + + + To build the framework, it is necessary to have a Java compiler +and the build tool, Ant, installed. To +build all the .jar files used by the framework, +one will run the command, with output similar to the following: + + +% ant jar +Buildfile: /var/lib/postgresql/PostgreSQL/clustertest-framework/build.xml + +compile-common: + [mkdir] Created dir: /var/lib/postgresql/PostgreSQL/clustertest-framework/build/classes + [javac] /var/lib/postgresql/PostgreSQL/clustertest-framework/build.xml:23: warning: 'includeantruntime' was not set, defaulting to build.sysclasspath=last; set to false for repeatable builds + +compile-testcoordinator: + [javac] /var/lib/postgresql/PostgreSQL/clustertest-framework/build.xml:44: warning: 'includeantruntime' was not set, defaulting to build.sysclasspath=last; set to false for repeatable builds + [javac] Compiling 25 source files to /var/lib/postgresql/PostgreSQL/clustertest-framework/build/classes + [javac] Note: /var/lib/postgresql/PostgreSQL/clustertest-framework/src/info/slony/clustertest/testcoordinator/script/ClientScript.java uses or overrides a deprecated API. + [javac] Note: Recompile with -Xlint:deprecation for details. + [copy] Copying 1 file to /var/lib/postgresql/PostgreSQL/clustertest-framework/build/classes/info/slony/clustertest/testcoordinator + +jar-common: + [mkdir] Created dir: /var/lib/postgresql/PostgreSQL/clustertest-framework/build/jar + [jar] Building MANIFEST-only jar: /var/lib/postgresql/PostgreSQL/clustertest-framework/build/jar/clustertest-common.jar + +compile-client: + [javac] /var/lib/postgresql/PostgreSQL/clustertest-framework/build.xml:30: warning: 'includeantruntime' was not set, defaulting to build.sysclasspath=last; set to false for repeatable builds + [javac] Compiling 1 source file to /var/lib/postgresql/PostgreSQL/clustertest-framework/build/classes + [copy] Copying 2 files to /var/lib/postgresql/PostgreSQL/clustertest-framework/build/classes/info/slony/clustertest/client + +jar-client: + [jar] Building jar: /var/lib/postgresql/PostgreSQL/clustertest-framework/build/jar/clustertest-client.jar + +jar-coordinator: + [jar] Building jar: /var/lib/postgresql/PostgreSQL/clustertest-framework/build/jar/clustertest-coordinator.jar + +jar: + +BUILD SUCCESSFUL +Total time: 2 seconds + + + At this time, there is no regression test for +the test framework; to validate that it works requires running tests +that use it. + + + Tests integrated into the &slony1; software +distribution, that consist of a combination of shell scripts, +JavaScript, and SQL scripts. + + See the directory clustertest in the +&slony1; software distribution, which has two sets of tests: + + + + + + + + + DISORDER - DIStributed ORDER test + + The DISORDER or DIStributed +ORDER test is intended to provide concurrency tests +involving a reasonably sophisticated schema to validate various +aspects of &slony1; behavior under concurrent load. + + It consists of: + + A schema for an inventory management application. + Major objects include customers, inventory items, orders, order lines, and shipments. + + There are foreign key relationships between the various items, +as well as triggers that maintain inventory and customer balances. +Some of these relationships involve ON DELETE CASCADE, and +so some actions may induce large numbers of cascaded updates. + + + + Stored procedures to induce creation of the various +sorts of objects, purchases, shipments, and additions and removals of +customers and products. + + Some tests are intended to be run against replicas, +validating that balances add up. We believe that &postgres; applies +changes in a transactional fashion such that they will always +COMMIT leaving the visible state consistent; +certain of the tests look for inconsistencies. + + There are JavaScript test scripts that induce all +sorts of manipulations of replication clusters to validate that +replication configuration changes succeed and fail as expected. + + + + + + + Regression Tests + + These tests represent a re-coding of the tests previously +implemented as shell scripts using the clustertest framework. + + These tests have gradually been enhanced to provide coverage of +scenarios with which &slony1; has had problems; it is to be expected +that new bugs may lead to the addition of further tests. + + + + + &slony1; Test Bed Framework test bed framework - As of version 1.1.5, &slony1; has a common test bed framework -intended to better support running a comprehensive set of tests at -least somewhat automatically. Older tests -used pgbench (not -a bad thing) but were troublesome to automate -because they were set up to spawn each &lslon; in -an xterm for the user -to watch. + Version 1.1.5 of &slony1; introduced a common test bed +framework intended to better support running a comprehensive set of +tests at least somewhat automatically. The new test framework is mostly written in Bourne shell, and is intended to be portable to both Bash (widely used on Linux) and Korn shell (widely found on commercial UNIX systems). The code lives -in the source tree under the tests - directory. +in the source tree under the tests +directory. At present, nearly all of the tests make use of only two databases that, by default, are on a single &postgres; postmaster on From 4d178af82b6957997a950da13a0fa5269339a1a7 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Thu, 3 Feb 2011 16:37:37 -0500 Subject: [PATCH 08/11] Document configuration for clustertests --- ...ties.sample => disorder.properties.sample} | 0 doc/adminguide/testbed.sgml | 80 ++++++++++++++++++- 2 files changed, 79 insertions(+), 1 deletion(-) rename clustertest/conf/{databases.properties.sample => disorder.properties.sample} (100%) diff --git a/clustertest/conf/databases.properties.sample b/clustertest/conf/disorder.properties.sample similarity index 100% rename from clustertest/conf/databases.properties.sample rename to clustertest/conf/disorder.properties.sample diff --git a/doc/adminguide/testbed.sgml b/doc/adminguide/testbed.sgml index e7ab4487..d11aac63 100644 --- a/doc/adminguide/testbed.sgml +++ b/doc/adminguide/testbed.sgml @@ -172,7 +172,49 @@ replication configuration changes succeed and fail as expected. - + Configuring DISORDER + + DISORDER test configuration may be found in the following +files: + +conf/disorder.properties.sample + + This file contains Java style properties indicating how to +connect to the various databases used by the DISORDER tests, including paths to +tools such as &lslon; and &lslonik; + + The sample file is to be copied to +conf/disorder.properties, and customized to +indicate your local configuration. By using a +.sample file, a developer may run tests within a +Git tree, and not need to worry about their customizations interfering +with the canonical sample configuration +provided. + + + +conf/java.conf.sample + + This is a shell script containing a path indicating where the +clustertest Java code (e.g. - the +clustertest-coordinator.jar file) may be found. This is +also used, indirectly to determine where additional Java .jar files +such as the JDBC driver are located. + + As with the disorder properties, above, this needs to be copied +to conf/java.conf, and customized to indicate one's own local +configuration. + + + +conf/log4j.properties + + See documentation for Log4J for more +details as to how this is configured; the defaults provided likely do +not need to be altered. + + + Regression Tests @@ -183,6 +225,42 @@ implemented as shell scripts using the clustertest framework. scenarios with which &slony1; has had problems; it is to be expected that new bugs may lead to the addition of further tests. + Configuring Regression Tests + + Similar to the for DISORDER tests, there are three configuration parameters: + +conf/slonyregress.properties.sample + + This file contains Java style properties indicating how to +connect to the various databases used by the regression tests, +including paths to tools such as &lslon; and &lslonik; + + The sample file is to be copied to +conf/slonyregress.properties, and customized to +indicate your local configuration. By using a +.sample file, a developer may run tests within a +Git tree, and not need to worry about their customizations interfering +with the canonical sample configuration +provided. + + + +conf/java.conf.sample + + This is a shell script containing a path indicating where the +clustertest Java code (e.g. - the +clustertest-coordinator.jar file) may be found. +This is also used, indirectly to determine where additional Java .jar +files such as the JDBC driver are located. + + + +conf/log4j.properties + + Identical to configuration for DISORDER. + + + From 643e07f335b6d5609199a43723b94e362e8993ff Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Thu, 3 Feb 2011 16:38:05 -0500 Subject: [PATCH 09/11] Renamed database.properties to disorder.properties --- clustertest/run_all_disorder_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustertest/run_all_disorder_tests.sh b/clustertest/run_all_disorder_tests.sh index a955eee2..ceadf413 100755 --- a/clustertest/run_all_disorder_tests.sh +++ b/clustertest/run_all_disorder_tests.sh @@ -9,7 +9,7 @@ else exit 1 fi -DBP=conf/databases.properties +DBP=conf/disorder.properties if [ -f ${DBP} ]; then i=1 else From 7a43896ca01666f7414ad89e2b942a7408f6ce7f Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Thu, 3 Feb 2011 17:52:41 -0500 Subject: [PATCH 10/11] Describe what specific PG/Slony things are in the test framework --- doc/adminguide/testbed.sgml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/adminguide/testbed.sgml b/doc/adminguide/testbed.sgml index d11aac63..bba79a68 100644 --- a/doc/adminguide/testbed.sgml +++ b/doc/adminguide/testbed.sgml @@ -120,6 +120,31 @@ Total time: 2 seconds At this time, there is no regression test for the test framework; to validate that it works requires running tests that use it. + + It includes classes supporting &postgres;- and &slony1;-specific functionality such as: + + CreateDbScript + Creates a database + DropDbScript + Drops a database + LogShippingDaemon + Starts up &slony1; logshipping daemon + LogShippingDumpScript + Dumps and loads logshipping-based schema + PgCommand + Run a &postgres; shell command (such as psql, createdb, and such) + PgDumpCommand + Dump a &postgres; database + PsqlCommandExec + Run SQL + ShellExecScript + Run a shell script/command + SlonLauncher + Start up a &lslon; process + SlonikScript + Run a &lslonik; script + + Tests integrated into the &slony1; software From 927b30b223848c9cf81fcb1766d7aaa443cd51e6 Mon Sep 17 00:00:00 2001 From: Christopher Browne Date: Fri, 4 Feb 2011 12:53:55 -0500 Subject: [PATCH 11/11] Revise index terms for testbed --- doc/adminguide/testbed.sgml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/adminguide/testbed.sgml b/doc/adminguide/testbed.sgml index bba79a68..8b85703d 100644 --- a/doc/adminguide/testbed.sgml +++ b/doc/adminguide/testbed.sgml @@ -1,6 +1,7 @@ &slony1; Test Suites +test suite overview &slony1; has had (thus far) three test suites: @@ -38,6 +39,7 @@ frameworks. Clustertest Test Framework +cluster test suite Introduction and Overview The clustertest framework is implemented in Java, where tests @@ -163,6 +165,8 @@ JavaScript, and SQL scripts. DISORDER - DIStributed ORDER test +DISORDER test suite + The DISORDER or DIStributed ORDER test is intended to provide concurrency tests involving a reasonably sophisticated schema to validate various @@ -243,6 +247,8 @@ not need to be altered. Regression Tests +regression tests, clustertest framework + These tests represent a re-coding of the tests previously implemented as shell scripts using the clustertest framework. @@ -291,7 +297,7 @@ files such as the JDBC driver are located. &slony1; Test Bed Framework -test bed framework +regression tests, old test framework Version 1.1.5 of &slony1; introduced a common test bed framework intended to better support running a comprehensive set of