-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtestrun.sh
More file actions
231 lines (219 loc) · 8.43 KB
/
testrun.sh
File metadata and controls
231 lines (219 loc) · 8.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
#The hash test requires --dropDB 1 is required for verificatio to work as hashing is database wide
#The hash test requires that a mongoS which can access the md5s is on localhost:27017 on the machine this script is ran on
#Do NOT use connection strings for database connections, use the form perferred by the shell
#It is recommended to use MONGO1 === MONGO2 (this is necessary for the MD5 verification)
#Note that the MD5 tests assume that the non-sharded test cases take place on the first shard, or the test will have fasle results
OPTIONS=
DRY_RUN=
DO_DROP=1
DO_REMOVE_DUMP=1
if [ ! -z ${DRY_RUN} ] && [ ${DRY_RUN} -eq 1 ]; then
DO_DROP=0
DO_REMOVE_DUMP=0
fi
ML_PATH=/home/charlie/git/mlightning/Debug/
DATA_DIR=/home/charlie/serialshort/
DUMP_PATH=/tmp/mlightning_test/
DIRECT_IN=1
DIRECT_OUT=1
DIRECT_FINAL_IN=0
DIRECT_FINAL_OUT=0
SUCCESS='\033[1;34m'
FAILURE='\033[0;31m'
TESTLINE='\033[0;32m'
TESTTYPE='\033[4;33m'
NOCOLOR='\033[0m'
#
#Start functions
#
FIRST_RUN=1
runtest() {
if [ ${FIRST_RUN} -eq 1 ]; then
FIRST_RUN=0
else
printf "\n\n"
fi
printf "${TESTLINE}***${1}\n`date`${NOCOLOR}\n"
shift
if [ ! -z ${DRY_RUN} ] && [ ${DRY_RUN} -eq 1 ]; then
echo "$@"
else
"$@"
fi
local status=$?
if [ $status -ne 0 ]; then
printf "${FAILURE}Error with mlightning testing, manual cleanup for the failed test is required (this is intentional, it is kept for debug purposes)${NOCOLOR}\n" >&2
exit $status
fi
}
dropdatabases() {
if [ "${MONGO1}" = "${MONGO2}" ]; then
echo "Dropping databases this test created."
mongo ${MONGO1} --norc << EOF
var toDrop = ["import", "mirror", "mltnimport", "trans"]
var mongos=new Mongo()
for (i = toDrop.length - 1; i >= 0; --i) {
print("Dropping database " + toDrop[i])
mongos.getDB(toDrop[i]).dropDatabase()
}
EOF
else
echo "Different clusters used in testing, not removing databases"
fi
}
removedumppath() {
if [ ${DO_REMOVE_DUMP} -ne 1 ]; then
return
fi
rm -rf mkdir ${DUMP_PATH}
if [ $? -eq 0 ]; then
echo "Dump path removed (it did not exist before this test was run)"
else
echo "Failure to remove the dump path: ${DUMP_PATH}"
fi
}
runloadingtest() {
runtest "Importing data" ${ML_PATH}mlightning ${OPTIONS} --shardKey '{"_id":"hashed"}' --output.uri ${MONGO1} --output.writeConcern 1 --output.direct ${DIRECT_OUT} --output.db import --output.coll original --loadPath ${DATA_DIR} --dropDb 1
runtest "Changing shard key" ${ML_PATH}mlightning ${OPTIONS} --shardKey '{"org":"hashed"}' --output.uri ${MONGO2} --output.writeConcern 1 --output.direct ${DIRECT_OUT} --output.db trans --output.coll trans --input.uri ${MONGO1} --input.db import --input.coll original --input.direct ${DIRECT_IN} --dropDb 1
#Direct isn't used here so routing is verified too
runtest "Reverting back to original shard key" ${ML_PATH}mlightning ${OPTIONS} --shardKey '{"_id":"hashed"}' --output.uri ${MONGO1} --output.writeConcern 1 --output.direct ${DIRECT_FINAL_OUT} --output.db mirror --output.coll mirror --input.uri ${MONGO2} --input.db trans --input.coll trans --input.direct ${DIRECT_FINAL_IN} --dropDb 1
#TODO: pump this into /tmp and use sed to set the variables
if [ "${MONGO1}" = "${MONGO2}" ]; then
runtest "Verifing resharding (The verify is only valid if all operations have taken place on the 127.0.0.1:27017 or the first shard of the cluster)" mongo ${MONOG1} --norc << EOF
var sourcedb="import"
var sourcecoll="original"
var targetdb="mirror"
var targetcoll="mirror"
var mongos=new Mongo()
var shardHost=mongos.getDB("config").getCollection("shards").findOne().host.toString()
print(shardHost)
var shard=new Mongo(shardHost)
//Ensure that there are documents on the shard being tested
var statsimport=shard.getDB(sourcedb).getCollection(sourcecoll).stats()
var statsmirror=shard.getDB(targetdb).getCollection(targetcoll).stats()
if(statsimport.count < 1) {
print("No documents to test on: " + shard)
quit(1)
}
//In theory this shouldn't be an issue if anything other than w:0 is used for the final write
if (statsimport.count != statsmirror.count) {
print("Waiting for count of records in new collection to stablize (in case w:0 was used)")
//Check to see if the count is increasing over 1 second
do {
var oldcount=statsmirror.count;
sleep(1)
statsmirror=shard.getDB(targetdb).getCollection(targetcoll).stats()
print(".")
} while(oldcount != statsmirror.count)
print(targetdb + "." + targetcoll + " stablized it's own count over a interval 1s, if this test still fails manual verification is suggested")
if (statsimport.count != statsmirror.count) {
print("Counts are not equal")
print(shardHost)
print(sourcedb + "." + sourcecoll + ":" statsimport.count)
print(targetdb + "." + targetcoll + ":" statsmirror.count)
quit(2)
}
}
var md5import=shard.getDB(sourcedb).runCommand({dbHash:1})
var md5mirror=shard.getDB(targetdb).runCommand({dbHash:1})
printjson(md5import)
printjson(md5mirror)
if (md5import.md5 != md5mirror.md5) {
print("MD5 check failed")
if (md5import.numCollections != md5mirror.numCollections)
print("Collection size for the databases being hashed aren't the same, is something else running?")
quit(1)
}
print("SUCCESS")
//quit() prevents this from being saved in the history
quit(0)
EOF
fi
} #runloadingtest
runfiletest() {
runtest "Dumping the database" ${ML_PATH}mlightning ${OPTIONS} --input.uri ${MONGO1} --input.direct ${DIRECT_IN} --input.db mirror --input.coll mirror --outputType mltn --workPath ${DUMP_PATH}
runtest "Restoring the database" ${ML_PATH}mlightning ${OPTIONS} --shardKey '{"_id":"hashed"}' --output.uri ${MONGO1} --output.direct ${DIRECT_OUT} --output.db mltnimport --output.coll mirror --inputType mltn --loadPath ${DUMP_PATH}
removedumppath
if [ "${MONGO1}" = "${MONGO2}" ]; then
runtest "Verifying dump and restore (The verify is only valid if all operations have taken place on the 127.0.0.1:27017 or the first shard of the cluster)" mongo ${MONOG1} --norc << EOF
var sourcedb="import"
var sourcecoll="original"
var targetdb="mltnimport"
var targetcoll="mirror"
var mongos=new Mongo()
var shardHost=mongos.getDB("config").getCollection("shards").findOne().host.toString()
print(shardHost)
var shard=new Mongo(shardHost)
//Ensure that there are documents on the shard being tested
var statsimport=shard.getDB(sourcedb).getCollection(sourcecoll).stats()
var statsmirror=shard.getDB(targetdb).getCollection(targetcoll).stats()
if(statsimport.count < 1) {
print("No documents to test on: " + shard)
quit(1)
}
//In theory this shouldn't be an issue if anything other than w:0 is used for the final write
if (statsimport.count != statsmirror.count) {
print("Waiting for count of records in new collection to stablize (in case w:0 was used)")
do {
sleep(1)
var oldstats=statsmirror;
statsmirror=shard.getDB(targetdb).getCollection(targetcoll).stats()
} while(oldstats.count != statsmirror.count)
print(targetdb + "." + targetcoll + " stablized over 1s, if this test still fails manual verification is suggested")
statsmirror=shard.getDB(targetdb).getCollection(targetcoll).stats()
if (statsimport.count != statsmirror.count) {
print("Counts are not equal")
printjson(statsimport.count)
printjson(statsmirror.count)
quit(2)
}
}
var md5import=shard.getDB(sourcedb).runCommand({dbHash:1})
var md5mirror=shard.getDB(targetdb).runCommand({dbHash:1})
printjson(md5import)
printjson(md5mirror)
if (md5import.md5 != md5mirror.md5) {
print("MD5 check failed")
if (md5import.numCollections != md5mirror.numCollections)
print("Collection size for the databases being hashed aren't the same, is something else running?")
quit(1)
}
print("SUCCESS")
//quit(0) prevents this from being saved in the history
quit(0)
EOF
fi
} #runfiletest
shardedtests()
{
printf "${TESTTYPE}\nStarting Sharded Tests${NOCOLOR}\n"
SHARDED=1
MONGO1=127.0.0.1:27017
MONGO2=${MONGO1}
runloadingtest
runfiletest
if [ ${DO_DROP} -eq 1 ]; then
dropdatabases
fi
}
replicatests() {
printf "${TESTTYPE}\nStarting Replica Tests${NOCOLOR}\n"
SHARDED=0
#Assuming we started a sharded cluster to begin with, removing mongoS
MONGO1=127.0.0.1:27018
MONGO2=$MONGO1
runloadingtest
runfiletest
if [ ${DO_DROP} -eq 1 ]; then
dropdatabases
fi
}
#
#Start run
#
shardedtests
#replicatests
printf "\nThere are no tests for unsharded collections in a sharded cluster\n"
#if [ -z ${DRY_RUN} ] || [ ${DRY_RUN} -eq 0 ]; then
printf "${SUCCESS}`date`\n***\n***\n*** All tests have successfully completed!\n***\n***${NOCOLOR}\n"
#fi