diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 00000000..c46504cc
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,386 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=.git,pymdht,libnacl,data
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=pylint_common,pylint.extensions.check_elif,pylint.extensions.check_docs
+
+# To install required plugins:
+# sudo apt install python-pylint-common python-enchant python-pylint-plugin-utils
+
+
+# Use multiple processes to speed up Pylint.
+jobs=8
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=libtorrent
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=yes
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+#disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating
+disable=C0321,W0142,invalid-name,missing-docstring,missing-type-doc
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb,on_
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=SQLObject,twisted.internet.reactor,ephem,libtorrent
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+# spelling-dict=en # Disable it for now, breaks pylint due to non-ascii chars.
+
+# To install required dictionary:
+# sudo apt install aspell-en
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=Tribler,dispersy,pymdht
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=120
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )??$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=2000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=LF
+
+
+[BASIC]
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_,d
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=yes
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=5
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 00000000..ad9b1c44
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,536 @@
+#!groovy
+// -*- mode: Groovy;-*-
+// Jenkinsfile ---
+//
+// Filename: Jenkinsfile
+// Description:
+// Author: Elric Milon
+// Maintainer:
+// Created: Thu Jun 9 14:11:55 2016 (+0200)
+
+// Commentary:
+//
+//
+//
+//
+
+// Change Log:
+//
+//
+//
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but
+// WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GNU Emacs. If not, see .
+//
+//
+
+// Code:
+
+def power_users = ["whirm", "lfdversluis"]
+def change_author
+
+//////////////////
+def failFast = true
+def skipTests = true
+def skipExperiments = false
+//////////////////
+
+def jobFailed = false
+
+stage "Verify author"
+
+node {
+ change_author = env.CHANGE_AUTHOR
+}
+
+def changeGHStatus(message) {
+ node {
+ // TODO: None of this seem to work at the time of writing.
+ //step([$class: 'GitHubCommitStatusSetter', contextSource: [$class: 'ManuallyEnteredCommitContextSource', context: 'Jenkins'], statusResultSource: [$class: 'ConditionalStatusResultSource', results: [[$class: 'AnyBuildResult', message: message, state: 'PENDING']]]])
+ //step([$class: 'GitHubCommitStatusSetter', statusResultSource: [$class: 'ConditionalStatusResultSource', results: [[$class: 'AnyBuildResult', message: message, state: 'PENDING']]]])
+ //step([$class: 'GitHubCommitNotifier', resultOnFailure: 'FAILURE', statusMessage: [content: message]])
+ //step([$class: 'GitHubSetCommitStatusBuilder', statusMessage: [content: message]])
+ }
+}
+
+echo "Changeset from ${change_author}"
+if (power_users.contains(change_author)) {
+ echo "PR comes from power user. Testing"
+} else {
+ changeGHStatus('Waiting for organization member aproval')
+ input "Do you want to test this change by '${change_author}'?"
+ changeGHStatus('Job execution approved, going forth!')
+}
+
+node {
+ deleteDir()
+ sh "touch allstashes.txt"
+ stash includes: "allstashes.txt", name: "allstashes.txt"
+}
+
+
+def gitCheckout(url, branch, targetDir=''){
+ if (targetDir == '') {
+ targetDir = (url =~ '.*/(.+).git')[0][1]
+ }
+ echo "cloning ${url} to ${targetDir} and checking out branch: ${branch}"
+
+ checkout([$class: 'GitSCM',
+ userRemoteConfigs: [[url: url]],
+ branches: [[name: branch]],
+
+ doGenerateSubmoduleConfigurations: false,
+ extensions: [[$class: 'CloneOption',
+ noTags: false,
+ reference: '',
+ shallow: true],
+
+ [$class: 'SubmoduleOption',
+ disableSubmodules: false,
+ recursiveSubmodules: true,
+ reference: '',
+ trackingSubmodules: false],
+
+ [$class: 'RelativeTargetDirectory',
+ relativeTargetDir: targetDir],
+
+ [$class: 'CleanCheckout'],
+
+ [$class: 'CleanBeforeCheckout']],
+ submoduleCfg: [],
+ ])
+
+}
+
+def checkoutGumby() {
+ gitCheckout('https://github.com/lfdversluis/gumby.git', '*/async-dispersy')
+}
+
+def unstashAll() {
+ unstash 'tribler'
+ unstash 'gumby'
+ dir('tribler/Tribler/'){
+ unstashDispersy()
+ }
+ echo "unstash all succeeded"
+}
+
+def unstashDispersy() {
+ unstash 'dispersy'
+ sh 'tar xpf dispersy.tar ; rm dispersy.tar'
+}
+
+
+def runTestsAndStash(testRunner, stashName) {
+ try {
+ testRunner()
+ } finally {
+ stash includes: 'output/**', name: "${stashName}"
+
+ unstash "allstashes.txt"
+ sh "echo ${stashName} >> allstashes.txt"
+ // def allStashes = readFile("allstashes.txt").split('\n')
+ // println "r1"
+ // println "${allStashes} ${stashName}"
+ // println "r2"
+ // allStashes.add("${stashName}".toString())
+ // println "r3"
+ // println "${allStashes}"
+ // writeFile file: 'allstashes.txt', text: allStashes.join("\n")
+ stash includes: "allstashes.txt", name: "allstashes.txt"
+ }
+}
+
+def unstashAllResults() {
+ unstash "allstashes.txt"
+ def allStashes = readFile("allstashes.txt").split('\n')
+ dir('output'){
+ for (int i = 0; i < allStashes.size(); i++) {
+
+ def stash = allStashes[i]
+ if (stash != "") {
+ echo "Unstashing '${stash}'"
+ unstash stash
+ }
+
+ }
+ }
+}
+
+
+// def fakeTestRun = {
+// sh '''mkdir output
+// cd output
+// echo date > file.txt
+// touch "$(date)"
+// '''
+// }
+
+// node {
+// deleteDir()
+// runTestsAndStash(fakeTestRun, "this_is_a_name")
+// deleteDir()
+// runTestsAndStash(fakeTestRun, "this_is_a_name_too")
+// deleteDir()
+// unstashAllResults()
+// sh "ls -lR"
+// sh "exit 1"
+// }
+
+
+def runDispersyTestsOnOSX = {
+ deleteDir()
+ unstashDispersy()
+ unstash 'gumby'
+ sh '''
+WORKSPACE=$PWD
+OUTPUT=$WORKSPACE/output
+mkdir -p $OUTPUT
+
+export PATH=$PATH:~/Library/Python/2.7/bin
+
+#NOSE_EXTRAS="--cover-html --cover-html-dir=$WORKSPACE/output/coverage/ --cover-branches -d"
+NOSECMD="nosetests -x -v --with-xcoverage --with-xunit --all-modules --traverse-namespace --cover-package=dispersy --cover-inclusive"
+
+$NOSECMD --xcoverage-file=$OUTPUT/coverage.xml --xunit-file=$OUTPUT/nosetests.xml --xunit-testsuite-name=OSX_dispersy dispersy/tests
+
+# TODO: make a gumby job to parallelize the dispersy test execution
+# env
+# export TMPDIR="$PWD/tmp"
+# export NOSE_COVER_TESTS=1
+# export GUMBY_nose_tests_parallelisation=12
+# export NOSE_TESTS_TO_RUN=dispersy/tests
+# export PYTHONPATH=$HOME/.local/lib/python2.7/site-packages:$PYTHONPATH
+# export PYLINTRC=$PWD/tribler/.pylintrc
+# ulimit -c unlimited
+# gumby/run.py gumby/experiments/tribler/run_all_tests_parallel.conf
+'''
+}
+
+def runDispersyTestsOnLinux = {
+ deleteDir()
+ unstashDispersy()
+ sh '''
+#cd tribler/Tribler/
+
+WORKSPACE=$PWD
+OUTPUT=$WORKSPACE/output
+mkdir -p $OUTPUT
+#NOSE_EXTRAS="--cover-html --cover-html-dir=$WORKSPACE/output/coverage/ --cover-branches -d"
+NOSECMD="nosetests -x -v --with-xcoverage --with-xunit --all-modules --traverse-namespace --cover-package=dispersy --cover-inclusive"
+
+$NOSECMD --xcoverage-file=$OUTPUT/coverage.xml --xunit-testsuite-name=Linux_dispersy dispersy/tests/
+
+# TODO: make a gumby job to parallelize the dispersy test execution
+# env
+# export TMPDIR="$PWD/tmp"
+# export NOSE_COVER_TESTS=1
+# export GUMBY_nose_tests_parallelisation=12
+# export NOSE_TESTS_TO_RUN=dispersy/tests
+# export PYTHONPATH=$HOME/.local/lib/python2.7/site-packages:$PYTHONPATH
+# export PYLINTRC=$PWD/tribler/.pylintrc
+# ulimit -c unlimited
+# gumby/run.py gumby/experiments/tribler/run_all_tests_parallel.conf
+'''
+}
+
+def runDispersyTestsOnWindows32 = {
+ deleteDir()
+ unstashDispersy()
+ sh '''
+PATH=/usr/bin/:$PATH
+WORKSPACE=$PWD
+# get the workspace as windows path
+UNIX_WORKSPACE=$(cygpath -u $WORKSPACE)
+export OUTPUT_DIR=$WORKSPACE\\output
+mkdir -p $UNIX_WORKSPACE/output
+
+WORKSPACE=$PWD
+OUTPUT=$WORKSPACE/output
+mkdir -p $OUTPUT
+#NOSE_EXTRAS="--cover-html --cover-html-dir=$WORKSPACE/output/coverage/ --cover-branches -d"
+NOSECMD="nosetests -x -v --with-xcoverage --with-xunit --all-modules --traverse-namespace --cover-package=dispersy --cover-inclusive"
+
+$NOSECMD --xcoverage-file=$OUTPUT/coverage.xml --xunit-testsuite-name=Win32_dispersy dispersy/tests/
+
+'''
+}
+
+def runTriblerTestsOnLinux = {
+ deleteDir()
+ unstashAll()
+ sh '''
+export TMPDIR="$PWD/tmp"
+export NOSE_COVER_TESTS=1
+export GUMBY_nose_tests_parallelisation=12
+export PYTHONPATH=$HOME/.local/lib/python2.7/site-packages:$PYTHONPATH
+export PYLINTRC=$PWD/tribler/.pylintrc
+ulimit -c unlimited
+gumby/run.py gumby/experiments/tribler/run_all_tests_parallel.conf
+'''
+}
+
+def runAllChannelExperiment = {
+ deleteDir()
+ unstashAll()
+ stash includes: '**', name: "experiment_workdir"
+ echo "stashed XXXXXXX"
+ try {
+ runOnFreeCluster('gumby/experiments/dispersy/allchannel.conf')
+ } finally {
+ dir('output'){
+ unstash 'experiment_results'
+ }
+ }
+}
+
+def runOnFreeCluster(experimentConf){
+ //def experimentConf = env.EXPERIMENT_CONF
+ // stage 'Checkout gumby'
+ // checkoutGumby()
+
+ stage 'Find a free cluster'
+
+ sh "ls -l"
+
+ def experimentName
+ def clusterName
+ node('master') {
+ echo "Reading ${experimentConf}"
+
+ def confFile = readFile(experimentConf).replaceAll(/#.*/,"")
+ // This stopped working after some jenkins update, no error, no exception,
+ // the rest of the node {} gets skipped and it goes on as if nothing
+ // happened.
+ // def configObject = new ConfigSlurper().parse(confFile) def
+ // neededNodes = configObject.das4_node_amount
+ // experimentName = configObject.experiment_name configObject = null
+
+ def getNodes = {
+ def matcher = confFile =~ 'das4_node_amount.*= *(.+)'
+ matcher[0][1]
+ }
+
+ def getExperimentName = {
+ def matcher = confFile =~ 'experiment_name.*= *(.+)'
+ matcher[0][1]
+ }
+
+ neededNodes = getNodes()
+ experimentName = getExperimentName()
+
+ try {
+ neededNodes = "${NODES}"
+ } catch (groovy.lang.MissingPropertyException err) {
+ echo "NODES env var not passed, using config file value"
+ }
+
+ sh "gumby/scripts/find_free_cluster.sh ${neededNodes}"
+ clusterName = readFile('cluster.txt')
+ }
+
+ stage "Run ${experimentName}"
+
+ node(clusterName) {
+ try {
+
+ unstash "experiment_workdir"
+
+ // stage 'Check out Gumby'
+ // checkoutGumby()
+
+ // stage 'Check out Tribler'
+ // gitCheckout('https://github.com/Tribler/tribler.git', '*/devel')
+
+ sh """
+gumby/scripts/build_virtualenv.sh
+source ~/venv/bin/activate
+
+./gumby/run.py ${experimentConf}
+"""
+ } finally {
+ stash includes: 'output/**', name: 'experiment_results'
+ }
+ }
+}
+
+stage "Checkout"
+
+parallel "Checkout Tribler without dispersy": {
+ node {
+ deleteDir()
+ gitCheckout('https://github.com/lfdversluis/tribler.git', '*/fix-dispersy-deferreds')
+
+ dir('tribler') {
+ // TODO: this shouldn't be necessary, but the git plugin gets really confused
+ // if a submodule's remote changes.
+ sh 'git submodule update --init --recursive'
+ }
+ stash includes: '**', excludes: 'tribler/Tribler/dispersy', name: 'tribler'
+ }
+},
+"Checkout Gumby": {
+ node {
+ deleteDir()
+ checkoutGumby()
+ stash includes: '**', name: 'gumby'
+ }
+},
+"Checkout dispersy": {
+ node {
+ dir('dispersy'){
+ deleteDir()
+ checkout scm
+ // TODO: this shouldn't be necessary, but the git plugin gets really confused
+ // if a submodule's remote changes.
+ sh 'git submodule update --init --recursive'
+ sh 'sed -i s/asdfasdf// tests/__init__.py' // Unbreak the tests
+ }
+ // TODO: For some reason it's impossible to stash any .git file, so work around it.
+ sh 'tar cpf dispersy.tar dispersy'
+ stash includes: 'dispersy.tar', name: 'dispersy'
+ }
+}, failFast: failFast
+
+stage "Tests"
+try {
+ if (! skipTests) {
+ parallel "Linux dispersy tests": {
+ node {
+ runTestsAndStash(runDispersyTestsOnLinux, 'dispersy_results')
+ }
+ },
+ "Linux Tribler tests": {
+ node {
+ runTestsAndStash(runTriblerTestsOnLinux, 'dispersy_tribler_results')
+ }
+ },
+ "OSX dispersy tests": {
+ node("osx") {
+ runTestsAndStash(runDispersyTestsOnOSX, 'dispersy_osx_results')
+ }
+ },
+ "Windows 32 dispersy tests": {
+ node("win32") {
+ runTestsAndStash(runDispersyTestsOnWindows32, 'dispersy_win32_results')
+ }
+ }, failFast: failFast
+ }
+} catch (all) {
+ jobFailed = true
+ throw all
+} finally {
+ if (! skipTests) {
+ node {
+ deleteDir()
+ unstashAllResults()
+ if (jobFailed) {
+ archive '**'
+ }
+ step([$class: 'JUnitResultArchiver', testResults: '**/*nosetests.xml'])
+ // step([$class: 'JUnitResultArchiver',
+ // testDataPublishers: [[$class: 'TestDataPublisher']],
+ // healthScaleFactor: 1000,
+ // testResults: '**/*nosetests.xml'])
+ }
+ }
+}
+
+stage "Coverage"
+if (! skipTests) {
+ node {
+ unstashDispersy()
+ unstashAllResults()
+ sh '''
+set -x
+echo $PATH
+export PATH=$PATH:$HOME/.local/bin/
+
+OUTPUT=$PWD/output/cover
+mkdir -p $OUTPUT
+
+cd dispersy
+
+diff-cover `find $OUTPUT/.. -iname coverage.xml` --compare-branch origin/$CHANGE_TARGET --fail-under 100 --html-report $OUTPUT/index.html --external-css-file $OUTPUT/style.css
+'''
+ dir('output/cover') {
+ publishHTML(target: [allowMissing: false, alwaysLinkToLastBuild: false, keepAll: true, reportDir: '.', reportFiles: 'index.html', reportName: 'Coverage diff'])
+ }
+ }
+}
+
+// stage "Experiments"
+try {
+ if (! skipExperiments) {
+ node('master') {
+ runTestsAndStash(runAllChannelExperiment, 'allchannel_results')
+ }
+ }
+} finally {
+ node('master'){
+ echo "??????"
+ unstashAllResults()
+ echo "!!!!!!"
+ // TODO: Archival should happen only once after everything runs or when something fails
+ archive '**'
+ echo "000000"
+ }
+}
+
+stage "Style and static analysis"
+parallel "Pylint": {
+ node {
+ deleteDir()
+ unstashDispersy()
+
+ sh '''
+export PATH=$PATH:$HOME/.local/bin/
+
+mkdir -p output
+
+cd dispersy
+
+ls -la
+
+#git branch -r
+#(git diff origin/${CHANGE_TARGET}..HEAD | grep ^diff)||:
+
+PYLINTRC=.pylintrc diff-quality --violations=pylint --options="dispersy" --compare-branch origin/${CHANGE_TARGET} --fail-under 100 --html-report ../output/index.html --external-css-file ../output/style.css
+'''
+ dir('output') {
+ publishHTML(target: [allowMissing: false, alwaysLinkToLastBuild: false, keepAll: true, reportDir: '.', reportFiles: 'index.html', reportName: 'Code quality diff'])
+ }
+ }
+},
+failFast: failFast
+
+stage "Rogue commit checks"
+node {
+ deleteDir()
+ unstashDispersy()
+ sh '''
+cd dispersy
+
+ROGUE_COMMITS=$(git log -E --grep=\'^(DROPME|fixup!|Merge)\' origin/${CHANGE_TARGET}..HEAD)
+
+if [ ! -z "${ROGUE_COMMITS}" ]; then
+echo "Found some bad commits:"
+echo $ROGUE_COMMITS
+exit 1
+fi
+'''
+}
+
+//
+// Jenkinsfile ends here
+
diff --git a/StormDBManager.py b/StormDBManager.py
new file mode 100644
index 00000000..6ca1a676
--- /dev/null
+++ b/StormDBManager.py
@@ -0,0 +1,398 @@
+import logging
+
+from storm.database import create_database
+from storm.exceptions import OperationalError
+from twisted.internet.defer import DeferredLock, inlineCallbacks
+
+class StormDBManager:
+ """
+ The StormDBManager is a manager that runs queries using the Storm Framework.
+ These queries will be run on the Twisted thread-pool to ensure asynchronous, non-blocking behavior.
+ In the future, this database manager will be the basis of an ORM based approach.
+ """
+
+ def __init__(self, db_path):
+ """
+ Sets up the database and all necessary elements for the database manager to function.
+ """
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ self.db_path = db_path
+ self._database = None
+ self.connection = None
+ self._cursor = None
+ self._version = 0
+ self._pending_commits = 0
+ self._commit_callbacks = []
+
+ # The transactor is required when you have methods decorated with the @transact decorator
+ # This field name must NOT be changed.
+ # self.transactor = Transactor(reactor.getThreadPool())
+
+ # Create a DeferredLock that should be used by callers to schedule their call.
+ self.db_lock = DeferredLock()
+
+ @inlineCallbacks
+ def initialize(self):
+ """
+ Open/create the database and initialize the version.
+ """
+ self._database = create_database(self.db_path)
+ self.connection = self._database.raw_connect()
+ self._cursor = self.connection.cursor()
+
+ self._version = 0
+ yield self._retrieve_version()
+
+ def close(self, commit=True):
+ assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
+ assert self.connection is not None, "Database.close() has been called or Database.open() has not been called"
+ if commit:
+ self.commit(exiting=True)
+ self._logger.debug("close database [%s]", self.db_path)
+ self._cursor.close()
+ self._cursor = None
+ self.connection.close()
+ self.connection = None
+ return True
+
+ @property
+ def version(self):
+ return self._version
+
+ @inlineCallbacks
+ def _retrieve_version(self):
+ """
+ Attempts to retrieve the current database version from the MyInfo table.
+ If it fails, the _version field remains at 0 as defined in the init function.
+ """
+ try:
+ version_str, = yield self.fetchone(u"SELECT value FROM MyInfo WHERE entry == 'version'")
+ self._version = int(version_str)
+ self._logger.info(u"Current database version is %s", self._version)
+ except (TypeError, OperationalError):
+ self._logger.warning(u"Failed to load database version, setting the DB version to 0.")
+ self._version = 0
+
+ def schedule_query(self, callable, *args, **kwargs):
+ """
+ Utility function to schedule a query to be executed using the db_lock.
+
+ Args:
+ callable: The database function that is to be executed.
+ *args: Any additional arguments that will be passed as the callable's arguments.
+ **kwargs: Keyword arguments that are passed to the callable function.
+
+ Returns: A deferred that fires with the result of the query.
+
+ """
+ return self.db_lock.run(callable, *args, **kwargs)
+
+ def execute(self, query, arguments=(), get_lastrowid=False):
+ """
+ Executes a query on the twisted thread pool using the Storm framework.
+
+ Args:
+ query: The sql query to be executed.
+ arguments: Optional arguments that go with the sql query.
+ get_lastrowid: If true, this function will return the last inserted row id, otherwise None.
+
+ Returns: A deferred that fires once the execution is done, the result will be None if get_lastrowid is False
+ else it returns with the last inserted row id.
+
+ """
+
+ # @transact
+ def _execute(self, query, arguments=(), get_lastrowid=False):
+ # connection = Connection(self._database)
+ ret = None
+ if get_lastrowid:
+ self._cursor.execute(query, arguments)
+ ret = self._cursor.lastrowid
+ else:
+ self._cursor.execute(query, arguments)
+ # connection.close()
+ return ret
+
+ return self.db_lock.run(_execute, self, query, arguments, get_lastrowid)
+
+ def executemany(self, query, list):
+ """
+ Executes a query on the twisted thread pool using the Storm framework many times using the values provided by
+ a list.
+
+ Args:
+ query: The sql query to be executed.
+ list: The list containing tuples of values to execute the query with.
+
+ Returns: A deferred that fires once the execution is done, the result will be None.
+
+ """
+ # def _execute(connection, query, arguments=()):
+ # connection.execute(query, arguments, noresult=True)
+
+ # @transact
+ def _executemany(self, query, list):
+ # connection = Connection(self._database)
+ self._cursor.executemany(query, list)
+ # for item in list:
+ # self._cursor.executemany(query, list)
+ # _execute(connection, query, item)
+ # connection.close()
+ return self.db_lock.run(_executemany, self, query, list)
+
+ def executescript(self, sql_statements):
+ """
+ Executes a script of several sql queries sequentially.
+ Note that this function does exist in SQLite, but not in the Storm framework:
+ https://www.mail-archive.com/storm@lists.canonical.com/msg00569.html
+
+ Args:
+ sql_statements: A list of sql statements to be executed.
+
+ Returns: A deferred that fires with None once all statements have been executed.
+
+ """
+ # def _execute(connection, query):
+ # connection.execute(query, noresult=True)
+
+ # @transact
+ def _executescript(self, sql_statements):
+ # connection = Connection(self._database)
+ for sql_statement in sql_statements:
+ self.execute(sql_statement)
+ # connection.close()
+ return self.db_lock.run(_executescript, self, sql_statements)
+
+ def fetchone(self, query, arguments=()):
+ """
+ Executes a query on the twisted thread pool using the Storm framework and returns the first result.
+ The optional arguments should be provided when running a parametrized query. It has to be an iterable data
+ structure (tuple, list, etc.).
+
+ Args:
+ query: The sql query to be executed.
+ arguments: Optional arguments that go with the sql query.
+
+ Returns: A deferred that fires with the first tuple that matches the query or None.
+ The result would be the same as using execute and calling the next() function on it, instead now you will get
+ None instead of a StopIterationException.
+
+ """
+ # @transact
+ def _fetchone(self, query, arguments=()):
+ # connection = Connection(self._database)
+ # result = connection.execute(query, arguments).get_one()
+ # connection.close()
+ # return result
+ try:
+ return self._cursor.execute(query, arguments).next()
+ except (StopIteration, OperationalError):
+ return None
+
+ return self.db_lock.run(_fetchone, self, query, arguments)
+
+ def fetchall(self, query, arguments=()):
+ """
+ Executes a query on the twisted thread pool using the Storm framework and returns a list of tuples containing
+ all matches through a deferred.
+
+ Args:
+ query: The sql query to be executed.
+ arguments: Optional arguments that go with the sql query.
+
+ Returns: A deferred that fires with a list of tuple results that matches the query, possibly empty.
+
+ """
+ # @transact
+ def _fetchall(self, query, arguments=()):
+ # connection = Connection(self._database)
+ # res = connection.execute(query, arguments).get_all()
+ # connection.close()
+ # return res
+ return self._cursor.execute(query, arguments).fetchall()
+
+
+ return self.db_lock.run(_fetchall, self, query, arguments)
+
+ def insert(self, table_name, **kwargs):
+ """
+ Inserts data provided as keyword arguments into the table provided as an argument.
+
+ Args:
+ table_name: The name of the table the data has to be inserted into.
+ **kwargs: A dictionary where the key represents the column and the value the value to be inserted.
+
+ Returns: A deferred that fires with None when the data has been inserted.
+
+ """
+ # @transact
+ def _insert(self, table_name, **kwargs):
+ # connection = Connection(self._database)
+ self._insert(table_name, **kwargs)
+ # connection.close()
+
+ return self.db_lock.run(_insert, self, table_name, **kwargs)
+
+ def _insert(self, table_name, **kwargs):
+ """
+ Utility function to insert data which is not decorated by the @transact to prevent a loop calling this function
+ to create many threads.
+ Do NOT call this function on the main thread, it will be blocking on that thread.
+
+ Args:
+ connection: The database connection object.
+ table_name: The name of the table the data has to be inserted into.
+ **kwargs: A dictionary where the key represents the column and the corresponding value the value to be
+ inserted.
+
+ Returns: A deferred that fires with None when the data has been inserted.
+
+ """
+ if len(kwargs) == 0:
+ raise ValueError("No keyword arguments supplied.")
+ if len(kwargs) == 1:
+ sql = u'INSERT INTO %s (%s) VALUES (?);' % (table_name, kwargs.keys()[0])
+ else:
+ questions = ','.join(('?',)*len(kwargs))
+ sql = u'INSERT INTO %s %s VALUES (%s);' % (table_name, tuple(kwargs.keys()), questions)
+
+ self._cursor.execute(sql, kwargs.values())
+
+ def insert_many(self, table_name, arg_list):
+ """
+ Inserts many items into a table.
+
+ Args:
+ table_name: The table name that you want to insert to.
+ arg_list: A list containing dictionaries where the key is the column name and the corresponding value the
+ value to be inserted into this column.
+
+ Returns: A deferred that fires with None once the bulk insertion is done.
+
+ """
+ # @transact
+ def _insert_many(self, table_name, arg_list):
+ if len(arg_list) == 0:
+ return
+ # connection = Connection(self._database)
+ # for args in arg_list:
+ # self._insert(connection, table_name, **args)
+ # connection.close()
+
+ for args in arg_list:
+ self._insert(table_name, **args)
+
+ return self.db_lock.run(_insert_many, self, table_name, arg_list)
+
+ def delete(self, table_name, **kwargs):
+ """
+ Utility function to delete from a table.
+
+ Args:
+ table_name: The table name to delete data from.
+ **kwargs: A dictionary containing key values.
+ The key is the column to target and the value can be a tuple or single element.
+ In case the value is a tuple, it can specify the operator.
+ In case the value is a single element, the equals "=" operator is used.
+
+ Returns: A deferred that fires with None once the deletion has been performed.
+
+ """
+ sql = u'DELETE FROM %s WHERE ' % table_name
+ arg = []
+ for k, v in kwargs.iteritems():
+ if isinstance(v, tuple):
+ sql += u'%s %s ? AND ' % (k, v[0])
+ arg.append(v[1])
+ else:
+ sql += u'%s=? AND ' % k
+ arg.append(v)
+ sql = sql[:-5] # Remove the last AND
+ return self.execute(sql, arg)
+
+ def count(self, table_name):
+ """
+ Utility function to get the number of rows of a table.
+
+ Args:
+ table_name: The table name.
+
+ Returns: A deferred that fires with the number of rows in the table.
+
+ """
+ sql = u"SELECT count(*) FROM %s LIMIT 1" % table_name
+ return self.fetchone(sql)
+
+ def commit(self, exiting=False):
+ assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
+
+ if self._pending_commits:
+ self._logger.debug("defer commit [%s]", self.db_path)
+ self._pending_commits += 1
+ return False
+
+ else:
+ self._logger.debug("commit [%s]", self.db_path)
+ for callback in self._commit_callbacks:
+ try:
+ callback(exiting=exiting)
+ except Exception as exception:
+ self._logger.exception("%s [%s]", exception, self.db_path)
+
+ return self.connection.commit()
+
+ def __enter__(self):
+ """
+ Enters a no-commit state. The commit will be performed by __exit__.
+
+ @return: The method self.execute
+ """
+ self._logger.debug("disabling commit [%s]", self.db_path)
+ self._pending_commits = max(1, self._pending_commits)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """
+ Leaves a no-commit state. A commit will be performed if Database.commit() was called while
+ in the no-commit state.
+ """
+
+ self._pending_commits, pending_commits = 0, self._pending_commits
+
+ if exc_type is None:
+ self._logger.debug("enabling commit [%s]", self.db_path)
+ if pending_commits > 1:
+ self._logger.debug("performing %d pending commits [%s]", pending_commits - 1, self.db_path)
+ self.commit()
+ return True
+ elif isinstance(exc_value, IgnoreCommits):
+ self._logger.debug("enabling commit without committing now [%s]", self.db_path)
+ return True
+ else:
+ # Niels 23-01-2013, an exception happened from within the with database block
+ # returning False to let Python reraise the exception.
+ return False
+
+ def attach_commit_callback(self, func):
+ assert not func in self._commit_callbacks
+ self._commit_callbacks.append(func)
+
+ def detach_commit_callback(self, func):
+ assert func in self._commit_callbacks
+ self._commit_callbacks.remove(func)
+
+class IgnoreCommits(Exception):
+
+ """
+ Ignore all commits made within the body of a 'with database:' clause.
+
+ with database:
+ # all commit statements are delayed until the database.__exit__
+ database.commit()
+ database.commit()
+ # raising IgnoreCommits causes all commits to be ignored
+ raise IgnoreCommits()
+ """
+ def __init__(self):
+ super(IgnoreCommits, self).__init__("Ignore all commits made within __enter__ and __exit__")
diff --git a/community.py b/community.py
index 38e5cc38..a472bc15 100644
--- a/community.py
+++ b/community.py
@@ -16,7 +16,7 @@
from time import time
from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall, deferLater
from twisted.python.threadable import isInIOThread
@@ -95,6 +95,7 @@ def get_classification(cls):
return cls.__name__.decode("UTF-8")
@classmethod
+ @inlineCallbacks
def create_community(cls, dispersy, my_member, *args, **kargs):
"""
Create a new community owned by my_member.
@@ -127,13 +128,13 @@ def create_community(cls, dispersy, my_member, *args, **kargs):
assert my_member.public_key, my_member.database_id
assert my_member.private_key, my_member.database_id
assert isInIOThread()
- master = dispersy.get_new_member(u"high")
+ master = yield dispersy.get_new_member(u"high")
# new community instance
- community = cls.init_community(dispersy, master, my_member, *args, **kargs)
+ community = yield cls.init_community(dispersy, master, my_member, *args, **kargs)
# create the dispersy-identity for the master member
- message = community.create_identity(sign_with_master=True)
+ yield community.create_identity(sign_with_master=True)
# authorize MY_MEMBER
permission_triplets = []
@@ -167,24 +168,32 @@ def create_community(cls, dispersy, my_member, *args, **kargs):
permission_triplets.append((my_member, message, allowed))
if permission_triplets:
- community.create_authorize(permission_triplets, sign_with_master=True, forward=False)
+ yield community.create_authorize(permission_triplets, sign_with_master=True, forward=False)
- return community
+ returnValue(community)
@classmethod
+ @inlineCallbacks
def get_master_members(cls, dispersy):
from .dispersy import Dispersy
assert isinstance(dispersy, Dispersy), type(dispersy)
assert isInIOThread()
logger.debug("retrieving all master members owning %s communities", cls.get_classification())
- execute = dispersy.database.execute
- return [dispersy.get_member(public_key=str(public_key)) if public_key else dispersy.get_member(mid=str(mid))
- for mid, public_key,
- in list(execute(u"SELECT m.mid, m.public_key FROM community AS c JOIN member AS m ON m.id = c.master"
+ communities = yield dispersy.database.stormdb.fetchall(u"SELECT m.mid, m.public_key FROM community AS c JOIN member AS m ON m.id = c.master"
u" WHERE c.classification = ?",
- (cls.get_classification(),)))]
+ (cls.get_classification(),))
+ member_list = []
+ for mid, public_key, in communities:
+ if public_key:
+ member = yield dispersy.get_member(public_key=str(public_key))
+ member_list.append(member)
+ else:
+ member = yield dispersy.get_member(mid=str(mid))
+ member_list.append(member)
+ returnValue(member_list)
@classmethod
+ @inlineCallbacks
def init_community(cls, dispersy, master, my_member, *args, **kargs):
"""
Initializes a new community, using master as the identifier and my_member as the
@@ -223,9 +232,9 @@ def init_community(cls, dispersy, master, my_member, *args, **kargs):
# add to dispersy
dispersy.attach_community(community)
- community.initialize(*args, **kargs)
+ yield community.initialize(*args, **kargs)
- return community
+ returnValue(community)
def __init__(self, dispersy, master, my_member):
"""
@@ -303,6 +312,7 @@ def __init__(self, dispersy, master, my_member):
self._fast_steps_taken = 0
self._sync_cache = None
+ @inlineCallbacks
def initialize(self):
assert isInIOThread()
self._logger.info("initializing: %s", self.get_classification())
@@ -315,23 +325,23 @@ def initialize(self):
self.register_task("periodic cleanup", LoopingCall(self._periodically_clean_delayed)).start(PERIODIC_CLEANUP_INTERVAL, now=False)
try:
- self._database_id, my_member_did, self._database_version = self._dispersy.database.execute(
+ self._database_id, my_member_did, self._database_version = yield self._dispersy.database.stormdb.fetchone(
u"SELECT id, member, database_version FROM community WHERE master = ?",
- (self._master_member.database_id,)).next()
+ (self._master_member.database_id,))
# if we're called with a different my_member, update the table to reflect this
if my_member_did != self._my_member.database_id:
- self._dispersy.database.execute(u"UPDATE community SET member = ? WHERE master = ?",
+ yield self._dispersy.database.stormdb.execute(u"UPDATE community SET member = ? WHERE master = ?",
(self._my_member.database_id, self._master_member.database_id))
- except StopIteration:
- self._dispersy.database.execute(
+ except TypeError:
+ yield self._dispersy.database.stormdb.execute(
u"INSERT INTO community(master, member, classification) VALUES(?, ?, ?)",
(self._master_member.database_id, self._my_member.database_id, self.get_classification()))
- self._database_id, self._database_version = self._dispersy.database.execute(
+ self._database_id, self._database_version = yield self._dispersy.database.stormdb.fetchone(
u"SELECT id, database_version FROM community WHERE master = ?",
- (self._master_member.database_id,)).next()
+ (self._master_member.database_id,))
self._logger.debug("database id: %d", self._database_id)
@@ -353,7 +363,8 @@ def initialize(self):
# batched insert
update_list = []
- for database_id, name, priority, direction in self._dispersy.database.execute(u"SELECT id, name, priority, direction FROM meta_message WHERE community = ?", (self._database_id,)):
+ meta_messages = yield self._dispersy.database.stormdb.fetchall(u"SELECT id, name, priority, direction FROM meta_message WHERE community = ?", (self._database_id,))
+ for database_id, name, priority, direction in meta_messages:
meta_message_info = self.meta_message_cache.get(name)
if meta_message_info:
if priority != meta_message_info["priority"] or direction != meta_message_info["direction"]:
@@ -363,17 +374,18 @@ def initialize(self):
del self.meta_message_cache[name]
if update_list:
- self._dispersy.database.executemany(u"UPDATE meta_message SET priority = ?, direction = ? WHERE id = ?",
+ yield self._dispersy.database.stormdb.executemany(u"UPDATE meta_message SET priority = ?, direction = ? WHERE id = ?",
update_list)
if self.meta_message_cache:
insert_list = []
for name, data in self.meta_message_cache.iteritems():
insert_list.append((self.database_id, name, data["priority"], data["direction"]))
- self._dispersy.database.executemany(u"INSERT INTO meta_message (community, name, priority, direction) VALUES (?, ?, ?, ?)",
+ yield self._dispersy.database.stormdb.executemany(u"INSERT INTO meta_message (community, name, priority, direction) VALUES (?, ?, ?, ?)",
insert_list)
- for database_id, name in self._dispersy.database.execute(u"SELECT id, name FROM meta_message WHERE community = ?", (self._database_id,)):
+ meta_messages = yield self._dispersy.database.stormdb.fetchall(u"SELECT id, name FROM meta_message WHERE community = ?", (self._database_id,))
+ for database_id, name in meta_messages:
self._meta_messages[name]._database_id = database_id # cleanup pre-fetched values
self.meta_message_cache = None
@@ -385,7 +397,8 @@ def initialize(self):
# the global time. zero indicates no messages are available, messages must have global
# times that are higher than zero.
- self._global_time, = self._dispersy.database.execute(u"SELECT MAX(global_time) FROM sync WHERE community = ?", (self._database_id,)).next()
+ self._global_time, = yield self._dispersy.database.stormdb.fetchone(u"SELECT MAX(global_time) FROM sync WHERE community = ?", (self._database_id,))
+ # TODO(Laurens): Probably a redundant statement.
if self._global_time is None:
self._global_time = 0
assert isinstance(self._global_time, (int, long))
@@ -393,7 +406,8 @@ def initialize(self):
self._logger.debug("global time: %d", self._global_time)
# the sequence numbers
- for current_sequence_number, name in self._dispersy.database.execute(u"SELECT MAX(sync.sequence), meta_message.name FROM sync, meta_message WHERE sync.meta_message = meta_message.id AND sync.member = ? AND meta_message.community = ? GROUP BY meta_message.name", (self._my_member.database_id, self.database_id)):
+ sequence_messages = yield self._dispersy.database.stormdb.fetchall(u"SELECT MAX(sync.sequence), meta_message.name FROM sync, meta_message WHERE sync.meta_message = meta_message.id AND sync.member = ? AND meta_message.community = ? GROUP BY meta_message.name", (self._my_member.database_id, self.database_id))
+ for current_sequence_number, name in sequence_messages:
if current_sequence_number:
self._meta_messages[name].distribution._current_sequence_number = current_sequence_number
@@ -412,7 +426,7 @@ def initialize(self):
# initial timeline. the timeline will keep track of member permissions
self._timeline = Timeline(self)
- self._initialize_timeline()
+ yield self._initialize_timeline()
# random seed, used for sync range
self._random = Random()
@@ -424,29 +438,29 @@ def initialize(self):
self._walk_candidates = self._iter_categories([u'walk', u'stumble', u'intro'])
# statistics...
- self._statistics.update()
+ yield self._statistics.update()
# turn on/off pruning
self._do_pruning = any(isinstance(meta.distribution, SyncDistribution) and
isinstance(meta.distribution.pruning, GlobalTimePruning)
for meta in self._meta_messages.itervalues())
- try:
- # check if we have already created the identity message
- self.dispersy._database.execute(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ? LIMIT 1",
- (self._my_member.database_id, self.get_meta_message
- (u"dispersy-identity").database_id)).next()
+ # check if we have already created the identity message
+ member = yield self.dispersy.database.stormdb.fetchone(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ? LIMIT 1",
+ (self._my_member.database_id, self.get_meta_message
+ (u"dispersy-identity").database_id))
+ if member:
self._my_member.add_identity(self)
- except StopIteration:
+ else:
# we haven't do it now
- self.create_identity()
+ yield self.create_identity()
# check/sanity check the database
- self.dispersy_check_database()
+ yield self.dispersy_check_database()
from sys import argv
if "--sanity-check" in argv:
try:
- self.dispersy.sanity_check(self)
+ yield self.dispersy.sanity_check(self)
except ValueError:
self._logger.exception("sanity check fail for %s", self)
@@ -477,25 +491,26 @@ def statistics(self):
"""
return self._statistics
+ @inlineCallbacks
def _download_master_member_identity(self):
assert not self._master_member.public_key
self._logger.debug("using dummy master member")
try:
- public_key, = self._dispersy.database.execute(u"SELECT public_key FROM member WHERE id = ?", (self._master_member.database_id,)).next()
- except StopIteration:
+ public_key, = yield self._dispersy.database.stormdb.fetchone(u"SELECT public_key FROM member WHERE id = ?", (self._master_member.database_id,))
+ except TypeError:
pass
else:
if public_key:
self._logger.debug("%s found master member", self._cid.encode("HEX"))
- self._master_member = self._dispersy.get_member(public_key=str(public_key))
+ self._master_member = yield self._dispersy.get_member(public_key=str(public_key))
assert self._master_member.public_key
self.cancel_pending_task("download master member identity")
else:
for candidate in islice(self.dispersy_yield_verified_candidates(), 1):
if candidate:
self._logger.debug("%s asking for master member from %s", self._cid.encode("HEX"), candidate)
- self.create_missing_identity(candidate, self._master_member)
+ yield self.create_missing_identity(candidate, self._master_member)
def _initialize_meta_messages(self):
assert isinstance(self._meta_messages, dict)
@@ -514,6 +529,7 @@ def _initialize_meta_messages(self):
"when sync is enabled the interval should be greater than the walking frequency. "
" otherwise you are likely to receive duplicate packets [%s]", meta_message.name)
+ @inlineCallbacks
def _initialize_timeline(self):
mapping = {}
for name in [u"dispersy-authorize", u"dispersy-revoke", u"dispersy-dynamic-settings"]:
@@ -524,12 +540,13 @@ def _initialize_timeline(self):
self._logger.warning("unable to load permissions from database [could not obtain %s]", name)
if mapping:
- for packet, in list(self._dispersy.database.execute(u"SELECT packet FROM sync WHERE meta_message IN (" + ", ".join("?" for _ in mapping) + ") ORDER BY global_time, packet",
- mapping.keys())):
- message = self._dispersy.convert_packet_to_message(str(packet), self, verify=False)
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(u"SELECT packet FROM sync WHERE meta_message IN (" + ", ".join("?" for _ in mapping) + ") ORDER BY global_time, packet",
+ mapping.keys())
+ for packet, in sync_packets:
+ message = yield self._dispersy.convert_packet_to_message(str(packet), self, verify=False)
if message:
self._logger.debug("processing %s", message.name)
- mapping[message.database_id]([message], initializing=True)
+ yield mapping[message.database_id]([message], initializing=True)
else:
# TODO: when a packet conversion fails we must drop something, and preferably check
# all messages in the database again...
@@ -537,21 +554,23 @@ def _initialize_timeline(self):
self.get_classification(), self.cid.encode("HEX"), str(packet).encode("HEX"))
@property
+ @inlineCallbacks
def dispersy_auto_load(self):
"""
When True, this community will automatically be loaded when a packet is received.
"""
# currently we grab it directly from the database, should become a property for efficiency
- return bool(self._dispersy.database.execute(u"SELECT auto_load FROM community WHERE master = ?",
- (self._master_member.database_id,)).next()[0])
+ auto_load = yield self._dispersy.database.stormdb.fetchone(u"SELECT auto_load FROM community WHERE master = ?",
+ (self._master_member.database_id,))
+ returnValue(bool(auto_load[0]))
- @dispersy_auto_load.setter
- def dispersy_auto_load(self, auto_load):
+ @inlineCallbacks
+ def set_dispersy_auto_load(self, auto_load):
"""
Sets the auto_load flag for this community.
"""
assert isinstance(auto_load, bool)
- self._dispersy.database.execute(u"UPDATE community SET auto_load = ? WHERE master = ?",
+ yield self._dispersy.database.stormdb.execute(u"UPDATE community SET auto_load = ? WHERE master = ?",
(1 if auto_load else 0, self._master_member.database_id))
@property
@@ -706,6 +725,7 @@ def dispersy_store(self, messages):
self._logger.debug("%s] %d out of %d were part of the cached bloomfilter",
self._cid.encode("HEX"), cached, len(messages))
+ @inlineCallbacks
def dispersy_claim_sync_bloom_filter(self, request_cache):
"""
Returns a (time_low, time_high, modulo, offset, bloom_filter) or None.
@@ -727,7 +747,7 @@ def dispersy_claim_sync_bloom_filter(self, request_cache):
self._logger.debug("%s reuse #%d (packets received: %d; %s)",
self._cid.encode("HEX"), cache.times_used, cache.responses_received,
hex(cache.bloom_filter._filter))
- return cache.time_low, cache.time_high, cache.modulo, cache.offset, cache.bloom_filter
+ returnValue((cache.time_low, cache.time_high, cache.modulo, cache.offset, cache.bloom_filter))
elif self._sync_cache.times_used == 0:
# Still no updates, gradually increment the skipping probability one notch
@@ -743,9 +763,9 @@ def dispersy_claim_sync_bloom_filter(self, request_cache):
self._logger.debug("skip: random() was <%f", self._SKIP_CURVE_STEPS[self._sync_cache_skip_count - 1])
self._statistics.sync_bloom_skip += 1
self._sync_cache = None
- return None
+ returnValue(None)
- sync = self.dispersy_sync_bloom_filter_strategy(request_cache)
+ sync = yield self.dispersy_sync_bloom_filter_strategy(request_cache)
if sync:
self._sync_cache = SyncCache(*sync)
self._sync_cache.candidate = request_cache.helper_candidate
@@ -755,11 +775,12 @@ def dispersy_claim_sync_bloom_filter(self, request_cache):
self._statistics.sync_bloom_reuse, self._statistics.sync_bloom_new,
round(1.0 * self._statistics.sync_bloom_reuse / self._statistics.sync_bloom_new, 2))
- return sync
+ returnValue(sync)
# instead of pivot + capacity, compare pivot - capacity and pivot + capacity to see which globaltime range is largest
- @runtime_duration_warning(0.5)
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@runtime_duration_warning(0.5)
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ @inlineCallbacks
def _dispersy_claim_sync_bloom_filter_largest(self, request_cache):
if __debug__:
t1 = time()
@@ -781,12 +802,12 @@ def _dispersy_claim_sync_bloom_filter_largest(self, request_cache):
if from_gbtime > 1 and self._nrsyncpackets >= capacity:
# use from_gbtime -1/+1 to include from_gbtime
- right, rightdata = self._select_bloomfilter_range(request_cache, syncable_messages, from_gbtime - 1, capacity, True)
+ right, rightdata = yield self._select_bloomfilter_range(request_cache, syncable_messages, from_gbtime - 1, capacity, True)
# if right did not get to capacity, then we have less than capacity items in the database
# skip left
if right[2] == capacity:
- left, leftdata = self._select_bloomfilter_range(request_cache, syncable_messages, from_gbtime + 1, capacity, False)
+ left, leftdata = yield self._select_bloomfilter_range(request_cache, syncable_messages, from_gbtime + 1, capacity, False)
left_range = (left[1] or self.global_time) - left[0]
right_range = (right[1] or self.global_time) - right[0]
@@ -809,7 +830,7 @@ def _dispersy_claim_sync_bloom_filter_largest(self, request_cache):
bloomfilter_range = [1, acceptable_global_time]
- data, fixed = self._select_and_fix(request_cache, syncable_messages, 0, capacity, True)
+ data, fixed = yield self._select_and_fix(request_cache, syncable_messages, 0, capacity, True)
if len(data) > 0 and fixed:
bloomfilter_range[1] = data[-1][0]
self._nrsyncpackets = capacity + 1
@@ -827,17 +848,18 @@ def _dispersy_claim_sync_bloom_filter_largest(self, request_cache):
self._logger.debug("%s took %f (fakejoin %f, rangeselect %f, dataselect %f, bloomfill, %f",
self.cid.encode("HEX"), time() - t1, t2 - t1, t3 - t2, t4 - t3, time() - t4)
- return (min(bloomfilter_range[0], acceptable_global_time), min(bloomfilter_range[1], acceptable_global_time), 1, 0, bloom)
+ returnValue((min(bloomfilter_range[0], acceptable_global_time), min(bloomfilter_range[1], acceptable_global_time), 1, 0, bloom))
if __debug__:
self._logger.debug("%s no messages to sync", self.cid.encode("HEX"))
elif __debug__:
self._logger.debug("%s NOT syncing no syncable messages", self.cid.encode("HEX"))
- return (1, acceptable_global_time, 1, 0, BloomFilter(8, 0.1, prefix='\x00'))
+ returnValue((1, acceptable_global_time, 1, 0, BloomFilter(8, 0.1, prefix='\x00')))
+ @inlineCallbacks
def _select_bloomfilter_range(self, request_cache, syncable_messages, global_time, to_select, higher=True):
- data, fixed = self._select_and_fix(request_cache, syncable_messages, global_time, to_select, higher)
+ data, fixed = yield self._select_and_fix(request_cache, syncable_messages, global_time, to_select, higher)
lowerfixed = True
higherfixed = True
@@ -848,10 +870,10 @@ def _select_bloomfilter_range(self, request_cache, syncable_messages, global_tim
to_select = to_select - len(data)
if to_select > 25:
if higher:
- lowerdata, lowerfixed = self._select_and_fix(request_cache, syncable_messages, global_time + 1, to_select, False)
+ lowerdata, lowerfixed = yield self._select_and_fix(request_cache, syncable_messages, global_time + 1, to_select, False)
data = lowerdata + data
else:
- higherdata, higherfixed = self._select_and_fix(request_cache, syncable_messages, global_time - 1, to_select, True)
+ higherdata, higherfixed = yield self._select_and_fix(request_cache, syncable_messages, global_time - 1, to_select, True)
data = data + higherdata
bloomfilter_range = [data[0][0], data[-1][0], len(data)]
@@ -876,16 +898,18 @@ def _select_bloomfilter_range(self, request_cache, syncable_messages, global_tim
if not higherfixed:
bloomfilter_range[1] = self.acceptable_global_time
- return bloomfilter_range, data
+ returnValue((bloomfilter_range, data))
+ @inlineCallbacks
+ # TODO(Laurens): request_cache is not used.
def _select_and_fix(self, request_cache, syncable_messages, global_time, to_select, higher=True):
assert isinstance(syncable_messages, unicode)
if higher:
- data = list(self._dispersy.database.execute(u"SELECT global_time, packet FROM sync WHERE meta_message IN (%s) AND undone = 0 AND global_time > ? ORDER BY global_time ASC LIMIT ?" % (syncable_messages),
- (global_time, to_select + 1)))
+ data = yield self._dispersy.database.stormdb.fetchall(u"SELECT global_time, packet FROM sync WHERE meta_message IN (%s) AND undone = 0 AND global_time > ? ORDER BY global_time ASC LIMIT ?" % (syncable_messages),
+ (global_time, to_select + 1))
else:
- data = list(self._dispersy.database.execute(u"SELECT global_time, packet FROM sync WHERE meta_message IN (%s) AND undone = 0 AND global_time < ? ORDER BY global_time DESC LIMIT ?" % (syncable_messages),
- (global_time, to_select + 1)))
+ data = yield self._dispersy.database.stormdb.fetchall(u"SELECT global_time, packet FROM sync WHERE meta_message IN (%s) AND undone = 0 AND global_time < ? ORDER BY global_time DESC LIMIT ?" % (syncable_messages),
+ (global_time, to_select + 1))
fixed = False
if len(data) > to_select:
@@ -900,37 +924,42 @@ def _select_and_fix(self, request_cache, syncable_messages, global_time, to_sele
if not higher:
data.reverse()
- return data, fixed
+ returnValue((data, fixed))
# instead of pivot + capacity, compare pivot - capacity and pivot + capacity to see which globaltime range is largest
- @runtime_duration_warning(0.5)
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@runtime_duration_warning(0.5)
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ @inlineCallbacks
+ # TODO(Laurens): This method is never used
def _dispersy_claim_sync_bloom_filter_modulo(self, request_cache):
syncable_messages = u", ".join(unicode(meta.database_id) for meta in self._meta_messages.itervalues() if isinstance(meta.distribution, SyncDistribution) and meta.distribution.priority > 32)
if syncable_messages:
bloom = BloomFilter(self.dispersy_sync_bloom_filter_bits, self.dispersy_sync_bloom_filter_error_rate, prefix=chr(int(random() * 256)))
capacity = bloom.get_capacity(self.dispersy_sync_bloom_filter_error_rate)
- self._nrsyncpackets = list(self._dispersy.database.execute(u"SELECT count(*) FROM sync WHERE meta_message IN (%s) AND undone = 0 LIMIT 1" % (syncable_messages)))[0][0]
+ db_sync_packets = yield self._dispersy.database.stormdb.fetchone(u"SELECT count(*) FROM sync WHERE meta_message IN (%s) AND undone = 0 LIMIT 1" % (syncable_messages))
+ self._nrsyncpackets = db_sync_packets[0]
modulo = int(ceil(self._nrsyncpackets / float(capacity)))
if modulo > 1:
offset = randint(0, modulo - 1)
- packets = list(str(packet) for packet, in self._dispersy.database.execute(u"SELECT sync.packet FROM sync WHERE meta_message IN (%s) AND sync.undone = 0 AND (sync.global_time + ?) %% ? = 0" % syncable_messages, (offset, modulo)))
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(u"SELECT sync.packet FROM sync WHERE meta_message IN (%s) AND sync.undone = 0 AND (sync.global_time + ?) %% ? = 0" % syncable_messages, (offset, modulo))
+ packets = list(str(packet) for packet, in sync_packets)
else:
offset = 0
modulo = 1
- packets = list(str(packet) for packet, in self._dispersy.database.execute(u"SELECT sync.packet FROM sync WHERE meta_message IN (%s) AND sync.undone = 0" % syncable_messages))
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(u"SELECT sync.packet FROM sync WHERE meta_message IN (%s) AND sync.undone = 0" % syncable_messages)
+ packets = list(str(packet) for packet, in sync_packets)
bloom.add_keys(packets)
self._logger.debug("%s syncing %d-%d, nr_packets = %d, capacity = %d, totalnr = %d",
self.cid.encode("HEX"), modulo, offset, self._nrsyncpackets, capacity, self._nrsyncpackets)
- return (1, self.acceptable_global_time, modulo, offset, bloom)
+ returnValue((1, self.acceptable_global_time, modulo, offset, bloom))
else:
self._logger.debug("%s NOT syncing no syncable messages", self.cid.encode("HEX"))
- return (1, self.acceptable_global_time, 1, 0, BloomFilter(8, 0.1, prefix='\x00'))
+ returnValue((1, self.acceptable_global_time, 1, 0, BloomFilter(8, 0.1, prefix='\x00')))
@property
def dispersy_sync_response_limit(self):
@@ -1070,15 +1099,17 @@ def unload_community(self):
self.dispersy.detach_community(self)
+ @inlineCallbacks
def claim_global_time(self):
"""
Increments the current global time by one and returns this value.
@rtype: int or long
"""
- self.update_global_time(self._global_time + 1)
+ yield self.update_global_time(self._global_time + 1)
self._logger.debug("claiming a new global time value @%d", self._global_time)
- return self._global_time
+ returnValue(self._global_time)
+ @inlineCallbacks
def update_global_time(self, global_time):
"""
Increase the local global time if the given GLOBAL_TIME is larger.
@@ -1091,15 +1122,16 @@ def update_global_time(self, global_time):
# Check for messages that need to be pruned because the global time changed.
for meta in self._meta_messages.itervalues():
if isinstance(meta.distribution, SyncDistribution) and isinstance(meta.distribution.pruning, GlobalTimePruning):
- self._dispersy.database.execute(
+ yield self._dispersy.database.stormdb.execute(
u"DELETE FROM sync WHERE meta_message = ? AND global_time <= ?",
(meta.database_id, self._global_time - meta.distribution.pruning.prune_threshold))
+ @inlineCallbacks
def dispersy_check_database(self):
"""
Called each time after the community is loaded and attached to Dispersy.
"""
- self._database_version = self._dispersy.database.check_community_database(self, self._database_version)
+ self._database_version = yield self._dispersy.database.check_community_database(self, self._database_version)
def get_conversion_for_packet(self, packet):
"""
@@ -1169,6 +1201,7 @@ def switch_to_normal_walking():
self.cancel_pending_task("take fast steps")
self.register_task("take step", LoopingCall(self.take_step)).start(TAKE_STEP_INTERVAL, now=True)
+ @inlineCallbacks
def take_fast_steps():
"""
Walk to all the initial and new eligible candidates.
@@ -1192,7 +1225,7 @@ def take_fast_steps():
for count, candidate in enumerate(eligible_candidates, 1):
self._logger.debug("%d of %d extra walk to %s", count, len(eligible_candidates), candidate)
- self.create_introduction_request(candidate, allow_sync=False, is_fast_walker=True)
+ yield self.create_introduction_request(candidate, allow_sync=False, is_fast_walker=True)
self._fast_steps_taken += 1
if self._fast_steps_taken >= FAST_WALKER_STEPS:
@@ -1206,6 +1239,7 @@ def take_fast_steps():
else:
switch_to_normal_walking()
+ @inlineCallbacks
def take_step(self):
now = time()
self._logger.debug("previous sync was %.1f seconds ago",
@@ -1215,7 +1249,7 @@ def take_step(self):
if candidate:
self._logger.debug("%s %s taking step towards %s",
self.cid.encode("HEX"), self.get_classification(), candidate)
- self.create_introduction_request(candidate, self.dispersy_enable_bloom_filter_sync)
+ yield self.create_introduction_request(candidate, self.dispersy_enable_bloom_filter_sync)
else:
self._logger.debug("%s %s no candidate to take step", self.cid.encode("HEX"), self.get_classification())
self._last_sync_time = time()
@@ -1518,12 +1552,13 @@ def add_discovered_candidate(self, d_candidate):
candidate = self.create_candidate(d_candidate.sock_addr, d_candidate.tunnel, d_candidate.sock_addr, d_candidate.sock_addr, u"unknown")
candidate.discovered(time())
+ @inlineCallbacks
def get_candidate_mid(self, mid):
- member = self._dispersy.get_member(mid=mid)
+ member = yield self._dispersy.get_member(mid=mid)
if member:
for candidate in self._candidates.itervalues():
if candidate.is_associated(member):
- return candidate
+ returnValue(candidate)
def filter_duplicate_candidate(self, candidate):
"""
@@ -1831,6 +1866,7 @@ def initiate_conversions(self):
"""
pass
+ @inlineCallbacks
def get_member(self, *argv, **kwargs):
assert not argv, "Only named arguments are allowed"
mid = kwargs.pop("mid", "")
@@ -1846,45 +1882,45 @@ def get_member(self, *argv, **kwargs):
assert not public_key or self._dispersy.crypto.is_valid_public_bin(public_key)
assert not private_key or self._dispersy.crypto.is_valid_private_bin(private_key)
- member = self._dispersy.get_member(mid=mid, public_key=public_key, private_key=private_key)
+ member = yield self._dispersy.get_member(mid=mid, public_key=public_key, private_key=private_key)
# We only need to check if this member has an identity message in this community if we still don't have the full
# public key
if not mid:
- return member
+ returnValue(member)
if isinstance(member, Member):
has_identity = member.has_identity(self)
if not has_identity:
# check database and update identity set if found
- try:
- self._dispersy.database.execute(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ? LIMIT 1",
- (member.database_id, self.get_meta_message(u"dispersy-identity").database_id)).next()
- except StopIteration:
- pass
- else:
+ sync_packet = yield self._dispersy.database.stormdb.fetchone(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ? LIMIT 1",
+ (member.database_id, self.get_meta_message(u"dispersy-identity").database_id))
+
+ if sync_packet is not None:
member.add_identity(self)
has_identity = True
if has_identity:
- return member
+ returnValue(member)
+ @inlineCallbacks
def _generic_timeline_check(self, messages):
meta = messages[0].meta
+ return_list = []
if isinstance(meta.authentication, NoAuthentication):
# we can not timeline.check this message because it uses the NoAuthentication policy
- for message in messages:
- yield message
+ return_list = messages
else:
for message in messages:
allowed, proofs = self.timeline.check(message)
if allowed:
- yield message
+ return_list.append(message)
else:
# reply with all proofs when message is rejected and has dynamicresolution
# in order to "fix" differences in dynamic resolution policy between us and the candidate
if isinstance(meta.resolution, DynamicResolution):
- self._dispersy._send_packets([message.candidate], [proof.packet for proof in proofs], self, "-caused by dynamic resolution-")
+ yield self._dispersy._send_packets([message.candidate], [proof.packet for proof in proofs], self, "-caused by dynamic resolution-")
- yield DelayMessageByProof(message)
+ return_list.append(DelayMessageByProof(message))
+ returnValue(iter(return_list))
def _drop(self, drop, packet, candidate):
self._logger.warning("drop a %d byte packet %s from %s", len(packet), drop, candidate)
@@ -1894,6 +1930,7 @@ def _drop(self, drop, packet, candidate):
elif isinstance(drop, DropMessage):
self._statistics.increase_msg_count(u"drop", u"drop_message:%s" % drop)
+ @inlineCallbacks
def _delay(self, match_info, delay, packet, candidate):
assert len(match_info) == 4, match_info
assert not match_info[0] or isinstance(match_info[0], unicode), type(match_info[0])
@@ -1918,7 +1955,7 @@ def _delay(self, match_info, delay, packet, candidate):
self._delayed_value[delay].append(unwrapped_key)
if send_request:
- delay.send_request(self, candidate)
+ yield delay.send_request(self, candidate)
self._statistics.increase_delay_msg_count(u"send")
self._logger.debug("delay a %d byte packet/message (%s) from %s", len(packet), delay, candidate)
@@ -1932,6 +1969,7 @@ def _delay(self, match_info, delay, packet, candidate):
delay.delayed = packet
delay.candidate = candidate
+ @inlineCallbacks
def _resume_delayed(self, meta, messages):
has_mid = isinstance(meta.authentication, (MemberAuthentication, DoubleMemberAuthentication))
has_seq = isinstance(meta.distribution, FullSyncDistribution) and meta.distribution.enable_sequence_number
@@ -1962,11 +2000,11 @@ def _resume_delayed(self, meta, messages):
if new_messages:
for new_messages_meta in new_messages.itervalues():
self._logger.debug("resuming %d messages", len(new_messages_meta))
- self.on_messages(list(new_messages_meta))
+ yield self.on_messages(list(new_messages_meta))
if new_packets:
self._logger.debug("resuming %d packets", len(new_packets))
- self.on_incoming_packets(list(new_packets), timestamp=time(), source=u"resumed")
+ yield self.on_incoming_packets(list(new_packets), timestamp=time(), source=u"resumed")
def _remove_delayed(self, delayed):
for key in self._delayed_value[delayed]:
@@ -1976,15 +2014,17 @@ def _remove_delayed(self, delayed):
del self._delayed_value[delayed]
+ @inlineCallbacks
def _periodically_clean_delayed(self):
now = time()
for delayed in self._delayed_value.keys():
if now > delayed.timestamp + 10:
self._remove_delayed(delayed)
- delayed.on_timeout()
+ yield delayed.on_timeout()
self._statistics.increase_delay_msg_count(u"timeout")
self._statistics.increase_msg_count(u"drop", u"delay_timeout:%s" % delayed)
+ @inlineCallbacks
def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unknown"):
"""
Process incoming packets for this community.
@@ -2020,7 +2060,7 @@ def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unkno
self._logger.debug("new cache with %d %s messages (batch window: %d)",
len(batch), meta.name, meta.batch.max_window)
else:
- self._on_batch_cache(meta, batch)
+ yield self._on_batch_cache(meta, batch)
self._statistics.increase_total_received_count(len(cur_packets))
@@ -2032,6 +2072,7 @@ def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unkno
self._statistics.increase_msg_count(
u"drop", u"convert_packets_into_batch:unknown conversion", len(cur_packets))
+ @inlineCallbacks
def _process_message_batch(self, meta):
"""
Start processing a batch of messages.
@@ -2048,8 +2089,10 @@ def _process_message_batch(self, meta):
self.cancel_pending_task(meta)
self._logger.debug("processing %sx %s batched messages", len(batch), meta.name)
- return self._on_batch_cache(meta, batch)
+ result = yield self._on_batch_cache(meta, batch)
+ returnValue(result)
+ @inlineCallbacks
def _on_batch_cache(self, meta, batch):
"""
Start processing a batch of messages.
@@ -2075,22 +2118,24 @@ def _on_batch_cache(self, meta, batch):
assert isinstance(candidate, Candidate)
assert isinstance(packet, str)
assert isinstance(conversion, Conversion)
+
try:
# convert binary data to internal Message
- messages.append(conversion.decode_message(candidate, packet, source=source))
+ decoded_message = yield conversion.decode_message(candidate, packet, source=source)
+ messages.append(decoded_message)
except DropPacket as drop:
self._drop(drop, packet, candidate)
except DelayPacket as delay:
- self._dispersy._delay(delay, packet, candidate)
+ yield self._dispersy._delay(delay, packet, candidate)
assert all(isinstance(message, Message.Implementation) for message in messages), "convert_batch_into_messages must return only Message.Implementation instances"
assert all(message.meta == meta for message in messages), "All Message.Implementation instances must be in the same batch"
# handle the incoming messages
if messages:
- self.on_messages(messages)
+ yield self.on_messages(messages)
def purge_batch_cache(self):
"""
@@ -2101,6 +2146,7 @@ def purge_batch_cache(self):
self.cancel_pending_task(meta)
self._batch_cache.clear()
+ @inlineCallbacks
def flush_batch_cache(self):
"""
Process all pending batches with a sync distribution.
@@ -2111,8 +2157,9 @@ def flush_batch_cache(self):
for meta, (_, batch) in flush_list:
self._logger.debug("flush cached %dx %s messages (dc: %s)",
len(batch), meta.name, self._pending_tasks[meta])
- self._process_message_batch(meta)
+ yield self._process_message_batch(meta)
+ @inlineCallbacks
def on_messages(self, messages):
"""
Process one batch of messages.
@@ -2141,14 +2188,17 @@ def on_messages(self, messages):
assert all(message.community == messages[0].community for message in messages)
assert all(message.meta == messages[0].meta for message in messages)
+ self._logger.debug("Community on_messag received %s messages", len(messages))
+
+ @inlineCallbacks
def _filter_fail(message):
if isinstance(message, DelayMessage):
- self._dispersy._delay(message, message.delayed.packet, message.delayed.candidate)
- return False
+ yield self._dispersy._delay(message, message.delayed.packet, message.delayed.candidate)
+ returnValue(False)
elif isinstance(message, DropMessage):
self._drop(message, message.dropped.packet, message.dropped.candidate)
- return False
- return True
+ returnValue(False)
+ returnValue(True)
meta = messages[0].meta
debug_count = len(messages)
@@ -2156,23 +2206,31 @@ def _filter_fail(message):
# drop all duplicate or old messages
assert type(meta.distribution) in self._dispersy._check_distribution_batch_map
- messages = list(self._dispersy._check_distribution_batch_map[type(meta.distribution)](messages))
+ messages = yield self._dispersy._check_distribution_batch_map[type(meta.distribution)](messages)
+ messages = list(messages)
# TODO(emilon): This seems iffy
assert len(messages) > 0 # should return at least one item for each message
assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)
# handle/remove DropMessage and DelayMessage instances
- messages = [message for message in messages if _filter_fail(message)]
+ tmp_messages = []
+ for message in messages:
+ filter_fail_result = yield _filter_fail(message)
+ if filter_fail_result:
+ tmp_messages.append(message)
+ messages = tmp_messages
+
if not messages:
- return 0
+ returnValue(0)
# check all remaining messages on the community side. may yield Message.Implementation,
# DropMessage, and DelayMessage instances
try:
- possibly_messages = list(meta.check_callback(messages))
- except:
+ possibly_messages_iter = yield meta.check_callback(messages)
+ possibly_messages = list(possibly_messages_iter)
+ except Exception:
self._logger.exception("exception during check_callback for %s", meta.name)
- return 0
+ returnValue(0)
# TODO(emilon): fixh _disp_check_modification in channel/community.py (tribler) so we can make a proper assert out of this.
assert len(possibly_messages) >= 0 # may return zero messages
assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage, DispersyInternalMessage)) for message in possibly_messages), possibly_messages
@@ -2185,9 +2243,15 @@ def _filter_fail(message):
meta.check_callback)
# handle/remove DropMessage and DelayMessage instances
- possibly_messages = [message for message in possibly_messages if _filter_fail(message)]
+ tmp_possibly_messages = []
+ for message in possibly_messages:
+ filter_fail_result = yield _filter_fail(message)
+ if filter_fail_result:
+ tmp_possibly_messages.append(message)
+ possibly_messages = tmp_possibly_messages
+
if not possibly_messages:
- return 0
+ returnValue(0)
other = []
messages = []
@@ -2204,7 +2268,9 @@ def _filter_fail(message):
if isinstance(message, Message.Implementation))))
# store to disk and update locally
- if self._dispersy.store_update_forward(possibly_messages, True, True, False):
+ result = yield self._dispersy.store_update_forward(possibly_messages, True, True, False)
+ self._logger.debug("Community on_messages, result of store_update_foward %s", result)
+ if result:
self._statistics.increase_msg_count(u"success", meta.name, len(messages))
if meta.name == u"dispersy-introduction-response":
@@ -2224,12 +2290,12 @@ def _filter_fail(message):
len(messages), debug_count, (debug_end - debug_begin),
meta.name, meta.batch.max_window)
- self._resume_delayed(meta, messages)
+ yield self._resume_delayed(meta, messages)
# return the number of messages that were correctly handled (non delay, duplicates, etc)
- return len(messages)
+ returnValue(len(messages))
- return 0
+ returnValue(0)
def on_identity(self, messages):
"""
@@ -2243,6 +2309,7 @@ def on_identity(self, messages):
if self.is_pending_task_active("download master member identity"):
self.cancel_pending_task("download master member identity")
+ @inlineCallbacks
def create_signature_request(self, candidate, message, response_func, response_args=(), timeout=10.0, forward=True):
"""
Create a dispersy-signature-request message.
@@ -2306,13 +2373,13 @@ def create_signature_request(self, candidate, message, response_func, response_a
# the dispersy-signature-request message that will hold the
# message that should obtain more signatures
meta = self.get_meta_message(u"dispersy-signature-request")
- cache.request = meta.impl(distribution=(self.global_time,),
+ cache.request = yield meta.impl(distribution=(self.global_time,),
destination=(candidate,),
payload=(cache.number, message))
self._logger.debug("asking %s", [member.mid.encode("HEX") for member in members])
- self._dispersy._forward([cache.request])
- return cache
+ yield self._dispersy._forward([cache.request])
+ returnValue(cache)
def check_signature_request(self, messages):
assert isinstance(messages[0].meta.authentication, NoAuthentication)
@@ -2329,6 +2396,7 @@ def check_signature_request(self, messages):
else:
yield DropMessage(message, "Nothing to sign")
+ @inlineCallbacks
def on_signature_request(self, messages):
"""
We received a dispersy-signature-request message.
@@ -2361,15 +2429,16 @@ def on_signature_request(self, messages):
assert isinstance(message.payload.message.authentication, DoubleMemberAuthentication.Implementation), type(message.payload.message.authentication)
# the community must allow this signature
- new_submsg = message.payload.message.authentication.allow_signature_func(message.payload.message)
+ new_submsg = yield message.payload.message.authentication.allow_signature_func(message.payload.message)
assert new_submsg is None or isinstance(new_submsg, Message.Implementation), type(new_submsg)
if new_submsg:
- responses.append(meta.impl(distribution=(self.global_time,),
+ created_impl = yield meta.impl(distribution=(self.global_time,),
destination=(message.candidate,),
- payload=(message.payload.identifier, new_submsg)))
+ payload=(message.payload.identifier, new_submsg))
+ responses.append(created_impl)
if responses:
- self.dispersy._forward(responses)
+ yield self.dispersy._forward(responses)
def check_signature_response(self, messages):
identifiers_seen = {}
@@ -2407,6 +2476,7 @@ def check_signature_response(self, messages):
identifiers_seen[message.payload.identifier] = message.candidate
yield message
+ @inlineCallbacks
def on_signature_response(self, messages):
"""
Handle one or more dispersy-signature-response messages.
@@ -2436,11 +2506,11 @@ def on_signature_response(self, messages):
for signature, member in new_submsg.authentication.signed_members:
if not signature and member == self._my_member:
new_submsg.authentication.sign(new_body)
- new_submsg.regenerate_packet()
+ yield new_submsg.regenerate_packet()
break
assert new_submsg.authentication.is_signed
- self.dispersy.store_update_forward([new_submsg], True, True, True)
+ yield self.dispersy.store_update_forward([new_submsg], True, True, True)
def check_introduction_request(self, messages):
"""
@@ -2454,6 +2524,7 @@ def check_introduction_request(self, messages):
yield message
+ @inlineCallbacks
def on_introduction_request(self, messages, extra_payload=None):
assert not extra_payload or isinstance(extra_payload, list), 'extra_payload is not a list %s' % type(extra_payload)
@@ -2502,10 +2573,12 @@ def on_introduction_request(self, messages, extra_payload=None):
introduction_args_list = tuple(introduction_args_list)
# create introduction response
- responses.append(meta_introduction_response.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=introduction_args_list))
+ created_impl = yield meta_introduction_response.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=introduction_args_list)
+ responses.append(created_impl)
# create puncture request
- requests.append(meta_puncture_request.impl(distribution=(self.global_time,), destination=(introduced,), payload=(payload.source_lan_address, payload.source_wan_address, payload.identifier)))
+ created_impl = yield meta_puncture_request.impl(distribution=(self.global_time,), destination=(introduced,), payload=(payload.source_lan_address, payload.source_wan_address, payload.identifier))
+ requests.append(created_impl)
else:
self._logger.debug("responding to %s without an introduction %s", candidate, type(self))
@@ -2517,12 +2590,13 @@ def on_introduction_request(self, messages, extra_payload=None):
introduction_args_list += extra_payload
introduction_args_list = tuple(introduction_args_list)
- responses.append(meta_introduction_response.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=introduction_args_list))
+ created_impl = yield meta_introduction_response.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=introduction_args_list)
+ responses.append(created_impl)
if responses:
- self._dispersy._forward(responses)
+ yield self._dispersy._forward(responses)
if requests:
- self._dispersy._forward(requests)
+ yield self._dispersy._forward(requests)
#
# process the bloom filter part of the request
@@ -2549,7 +2623,8 @@ def on_introduction_request(self, messages, extra_payload=None):
messages_with_sync.append((message, time_low, time_high, offset, modulo))
if messages_with_sync:
- for message, generator in self._get_packets_for_bloomfilters(messages_with_sync, include_inactive=False):
+ bloomfilter_packets = yield self._get_packets_for_bloomfilters(messages_with_sync, include_inactive=False)
+ for message, generator in bloomfilter_packets:
payload = message.payload
# we limit the response by byte_limit bytes
byte_limit = self.dispersy_sync_response_limit
@@ -2565,20 +2640,21 @@ def on_introduction_request(self, messages, extra_payload=None):
if packets:
self._logger.debug("syncing %d packets (%d bytes) to %s",
len(packets), sum(len(packet) for packet in packets), message.candidate)
- self._dispersy._send_packets([message.candidate], packets, self, "-caused by sync-")
+ yield self._dispersy._send_packets([message.candidate], packets, self, "-caused by sync-")
def check_introduction_response(self, messages):
identifiers_seen = {}
for message in messages:
if not self.request_cache.has(u"introduction-request", message.payload.identifier):
self._dispersy._statistics.invalid_response_identifier_count += 1
- yield DropMessage(message, "invalid response identifier")
+ yield DropMessage(message, "invalid response identifier: request_cache does not have this request")
continue
if message.payload.identifier in identifiers_seen:
- self._logger.error("already seen this indentifier in this batch, previous candidate %s this one %s", identifiers_seen[message.payload.identifier], message.candidate)
+ self._logger.error("already seen this indentifier in this batch, previous candidate %s this one %s",
+ identifiers_seen[message.payload.identifier], message.candidate)
self._dispersy._statistics.invalid_response_identifier_count += 1
- yield DropMessage(message, "invalid response identifier")
+ yield DropMessage(message, "invalid response identifier: identifier already seen")
continue
# check introduced LAN address, if given
@@ -2653,6 +2729,7 @@ def on_introduction_response(self, messages):
if self._dispersy._statistics.received_introductions is not None:
self._dispersy._statistics.received_introductions[candidate.sock_addr]['-ignored-'] += 1
+ @inlineCallbacks
def create_introduction_request(self, destination, allow_sync, forward=True, is_fast_walker=False, extra_payload=None):
assert isinstance(destination, WalkCandidate), [type(destination), destination]
assert not extra_payload or isinstance(extra_payload, list), 'extra_payload is not a list %s' % type(extra_payload)
@@ -2671,8 +2748,8 @@ def create_introduction_request(self, destination, allow_sync, forward=True, is_
else:
# flush any sync-able items left in the cache before we create a sync
- self.flush_batch_cache()
- sync = self.dispersy_claim_sync_bloom_filter(cache)
+ yield self.flush_batch_cache()
+ sync = yield self.dispersy_claim_sync_bloom_filter(cache)
if __debug__:
assert sync is None or isinstance(sync, tuple), sync
if not sync is None:
@@ -2686,7 +2763,8 @@ def create_introduction_request(self, destination, allow_sync, forward=True, is_
# verify that the bloom filter is correct
try:
- _, packets = self._get_packets_for_bloomfilters([[None, time_low, self.global_time if time_high == 0 else time_high, offset, modulo]], include_inactive=True).next()
+ bloomfilter_packets = yield self._get_packets_for_bloomfilters([[None, time_low, self.global_time if time_high == 0 else time_high, offset, modulo]], include_inactive=True)
+ _, packets = bloomfilter_packets.next()
packets = [packet for packet, in packets]
except OverflowError:
@@ -2720,7 +2798,7 @@ def create_introduction_request(self, destination, allow_sync, forward=True, is_
args_list = tuple(args_list)
meta_request = self.get_meta_message(u"dispersy-introduction-request")
- request = meta_request.impl(authentication=(self.my_member,),
+ request = yield meta_request.impl(authentication=(self.my_member,),
distribution=(self.global_time,),
destination=(destination,),
payload=args_list)
@@ -2735,10 +2813,11 @@ def create_introduction_request(self, destination, allow_sync, forward=True, is_
self._logger.debug("%s %s sending introduction request to %s",
self.cid.encode("HEX"), type(self), destination)
- self._dispersy._forward([request])
+ yield self._dispersy._forward([request])
- return request
+ returnValue(request)
+ @inlineCallbacks
def _get_packets_for_bloomfilters(self, requests, include_inactive=True):
"""
Return all packets matching a Bloomfilter request
@@ -2793,6 +2872,7 @@ def get_sub_select(meta):
sql = "".join((u"SELECT * FROM (", " UNION ALL ".join(get_sub_select(meta) for meta in meta_messages), ")"))
self._logger.debug(sql)
+ return_messages = []
for message, time_low, time_high, offset, modulo in requests:
sql_arguments = []
for meta in meta_messages:
@@ -2804,7 +2884,10 @@ def get_sub_select(meta):
sql_arguments.extend((meta.database_id, _time_low, time_high, offset, modulo))
self._logger.debug("%s", sql_arguments)
- yield message, ((str(packet),) for packet, in self._dispersy._database.execute(sql, sql_arguments))
+ db_res = yield self._dispersy.database.stormdb.fetchall(sql, sql_arguments)
+ return_messages.append((message, ((str(packet),) for packet, in db_res)))
+
+ returnValue(iter(return_messages))
def check_puncture_request(self, messages):
for message in messages:
@@ -2834,6 +2917,7 @@ def check_puncture_request(self, messages):
yield message
+ @inlineCallbacks
def on_puncture_request(self, messages):
meta_puncture = self.get_meta_message(u"dispersy-puncture")
punctures = []
@@ -2852,10 +2936,11 @@ def on_puncture_request(self, messages):
tunnel = False
candidate = Candidate(sock_addr, tunnel)
- punctures.append(meta_puncture.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=(self._dispersy._lan_address, self._dispersy._wan_address, message.payload.identifier)))
+ created_impl = yield meta_puncture.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(candidate,), payload=(self._dispersy._lan_address, self._dispersy._wan_address, message.payload.identifier))
+ punctures.append(created_impl)
self._logger.debug("%s asked us to send a puncture to %s", message.candidate, candidate)
- self._dispersy._forward(punctures)
+ yield self._dispersy._forward(punctures)
def check_puncture(self, messages):
identifiers_seen = {}
@@ -2886,11 +2971,13 @@ def on_puncture(self, messages):
self._logger.debug("received punture from %s", candidate)
cache.puncture_candidate = candidate
+ @inlineCallbacks
def create_missing_message(self, candidate, member, global_time):
meta = self.get_meta_message(u"dispersy-missing-message")
- request = meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(member, [global_time]))
- self._dispersy._forward([request])
+ request = yield meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(member, [global_time]))
+ yield self._dispersy._forward([request])
+ @inlineCallbacks
def on_missing_message(self, messages):
for message in messages:
@@ -2899,18 +2986,19 @@ def on_missing_message(self, messages):
member_database_id = message.payload.member.database_id
for global_time in message.payload.global_times:
try:
- packet, = self._dispersy._database.execute(u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ?",
- (self.database_id, member_database_id, global_time)).next()
+ packet, = yield self._dispersy.database.stormdb.fetchone(u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ?",
+ (self.database_id, member_database_id, global_time))
responses.append(str(packet))
- except StopIteration:
+ except TypeError:
pass
if responses:
- self._dispersy._send_packets([candidate], responses, self, "-caused by missing-message-")
+ yield self._dispersy._send_packets([candidate], responses, self, "-caused by missing-message-")
else:
self._logger.warning('could not find missing messages for candidate %s, global_times %s',
candidate, message.payload.global_times)
+ @inlineCallbacks
def create_identity(self, sign_with_master=False, store=True, update=True):
"""
Create a dispersy-identity message for self.my_member.
@@ -2935,20 +3023,21 @@ def create_identity(self, sign_with_master=False, store=True, update=True):
#
# as a security feature we force that the global time on dispersy-identity messages are
# always 2 or higher (except for master members who should get global time 1)
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
while global_time < 2:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- message = meta.impl(authentication=(self.master_member if sign_with_master else self.my_member,),
+ message = yield meta.impl(authentication=(self.master_member if sign_with_master else self.my_member,),
distribution=(global_time,))
- self._dispersy.store_update_forward([message], store, update, False)
+ yield self._dispersy.store_update_forward([message], store, update, False)
# indicate that we have the identity message
if sign_with_master:
self.master_member.add_identity(self)
else:
self.my_member.add_identity(self)
- return message
+ returnValue(message)
+ @inlineCallbacks
def create_missing_identity(self, candidate, dummy_member):
"""
Create a dispersy-missing-identity message.
@@ -2962,9 +3051,10 @@ def create_missing_identity(self, candidate, dummy_member):
assert isinstance(dummy_member, DummyMember)
meta = self.get_meta_message(u"dispersy-missing-identity")
- request = meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(dummy_member.mid,))
- self._dispersy._forward([request])
+ request = yield meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(dummy_member.mid,))
+ yield self._dispersy._forward([request])
+ @inlineCallbacks
def on_missing_identity(self, messages):
"""
We received dispersy-missing-identity messages.
@@ -2985,13 +3075,15 @@ def on_missing_identity(self, messages):
mid = message.payload.mid
# we are assuming that no more than 10 members have the same sha1 digest.
- for member_id in [member_id for member_id, in self._dispersy._database.execute(sql_member, (buffer(mid),))]:
- packets = [str(packet) for packet, in self._dispersy._database.execute(sql_packet,
- (self.database_id, member_id, meta_id))]
+ sql_members = yield self._dispersy.database.stormdb.fetchall(sql_member, (buffer(mid),))
+ for member_id in [member_id for member_id, in sql_members]:
+ sql_packets = yield self._dispersy.database.stormdb.fetchall(sql_packet,
+ (self.database_id, member_id, meta_id))
+ packets = [str(packet) for packet, in sql_packets]
if packets:
self._logger.debug("responding with %d identity messages", len(packets))
- self._dispersy._send_packets([message.candidate], packets, self, "-caused by missing-identity-")
+ yield self._dispersy._send_packets([message.candidate], packets, self, "-caused by missing-identity-")
else:
assert not message.payload.mid == self.my_member.mid, "we should always have our own dispersy-identity"
@@ -2999,11 +3091,13 @@ def on_missing_identity(self, messages):
" no response is sent [%s, mid:%s, cid:%s]",
mid.encode("HEX"), self.my_member.mid.encode("HEX"), self.cid.encode("HEX"))
+ @inlineCallbacks
def create_missing_sequence(self, candidate, member, message, missing_low, missing_high):
meta = self.get_meta_message(u"dispersy-missing-sequence")
- request = meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(member, message, missing_low, missing_high))
- self._dispersy._forward([request])
+ request = yield meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(member, message, missing_low, missing_high))
+ yield self._dispersy._forward([request])
+ @inlineCallbacks
def on_missing_sequence(self, messages):
"""
We received a dispersy-missing-sequence message.
@@ -3042,6 +3136,8 @@ def merge_ranges(ranges):
cur_low, cur_high = low, high
yield (cur_low, cur_high)
+ @inlineCallbacks
+ # TODO(Laurens): member_id and message_id are not used.
def fetch_packets(member_id, message_id, candidate, requests):
# We limit the response by byte_limit bytes per incoming candidate
byte_limit = self.dispersy_missing_sequence_response_limit
@@ -3055,19 +3151,20 @@ def fetch_packets(member_id, message_id, candidate, requests):
self._logger.debug("fetching member:%d message:%d packets from database for %s",
member_id, message_id, candidate)
for range_min, range_max in merge_ranges(sequences):
- for packet, in self._dispersy._database.execute(
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(
u"SELECT packet FROM sync "
u"WHERE member = ? AND meta_message = ? AND sequence BETWEEN ? AND ? "
u"ORDER BY sequence",
- (member_id, message_id, range_min, range_max)):
+ (member_id, message_id, range_min, range_max))
+ for packet, in sync_packets:
packet = str(packet)
packets.append(packet)
byte_limit -= len(packet)
if byte_limit <= 0:
self._logger.debug("Bandwidth throttle. byte_limit:%d", byte_limit)
- return packets
- return packets
+ returnValue(packets)
+ returnValue(packets)
sources = defaultdict(lambda: defaultdict(list))
for message in messages:
@@ -3081,11 +3178,11 @@ def fetch_packets(member_id, message_id, candidate, requests):
for candidate, member_message_requests in sources.iteritems():
assert isinstance(candidate, Candidate), type(candidate)
- packets = fetch_packets(member_id, message_id, candidate, member_message_requests)
+ packets = yield fetch_packets(member_id, message_id, candidate, member_message_requests)
if __debug__:
# ensure we are sending the correct sequence numbers back
for packet in packets:
- msg = self._dispersy.convert_packet_to_message(packet, self)
+ msg = yield self._dispersy.convert_packet_to_message(packet, self)
assert msg
self._logger.debug("syncing %d bytes, member:%d message:%d sequence:%d to %s",
len(packet),
@@ -3094,34 +3191,37 @@ def fetch_packets(member_id, message_id, candidate, requests):
msg.distribution.sequence_number,
candidate)
- self._dispersy._send_packets([candidate], packets, self, u"-sequence-")
+ yield self._dispersy._send_packets([candidate], packets, self, u"-sequence-")
+ @inlineCallbacks
def create_missing_proof(self, candidate, message):
meta = self.get_meta_message(u"dispersy-missing-proof")
- request = meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(message.authentication.member, message.distribution.global_time))
- self._dispersy._forward([request])
+ request = yield meta.impl(distribution=(self.global_time,), destination=(candidate,), payload=(message.authentication.member, message.distribution.global_time))
+ yield self._dispersy._forward([request])
+ @inlineCallbacks
def on_missing_proof(self, messages):
for message in messages:
try:
- packet, = self._dispersy._database.execute(u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ? LIMIT 1",
- (self.database_id, message.payload.member.database_id, message.payload.global_time)).next()
+ packet, = yield self._dispersy.database.stormdb.fetchone(u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ? LIMIT 1",
+ (self.database_id, message.payload.member.database_id, message.payload.global_time))
- except StopIteration:
+ except TypeError:
self._logger.warning("someone asked for proof for a message that we do not have")
else:
packet = str(packet)
- msg = self._dispersy.convert_packet_to_message(packet, self, verify=False)
+ msg = yield self._dispersy.convert_packet_to_message(packet, self, verify=False)
allowed, proofs = self.timeline.check(msg)
if allowed and proofs:
self._logger.debug("we found %d packets containing proof for %s", len(proofs), message.candidate)
- self._dispersy._send_packets([message.candidate], [proof.packet for proof in proofs], self, "-caused by missing-proof-")
+ yield self._dispersy._send_packets([message.candidate], [proof.packet for proof in proofs], self, "-caused by missing-proof-")
else:
self._logger.debug("unable to give %s missing proof. allowed:%s. proofs:%d packets",
message.candidate, allowed, len(proofs))
+ @inlineCallbacks
def create_authorize(self, permission_triplets, sign_with_master=False, store=True, update=True, forward=True):
"""
Grant permissions to members in a self.
@@ -3174,12 +3274,17 @@ def create_authorize(self, permission_triplets, sign_with_master=False, store=Tr
assert triplet[2] in (u"permit", u"authorize", u"revoke", u"undo")
meta = self.get_meta_message(u"dispersy-authorize")
- message = meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
- distribution=(self.claim_global_time(), self._claim_master_member_sequence_number(meta) if sign_with_master else meta.distribution.claim_sequence_number()),
+ claimed_global_time = yield self.claim_global_time()
+ if sign_with_master:
+ sequence_number = yield self._claim_master_member_sequence_number(meta)
+ else:
+ sequence_number = meta.distribution.claim_sequence_number()
+ message = yield meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
+ distribution=(claimed_global_time, sequence_number),
payload=(permission_triplets,))
- self._dispersy.store_update_forward([message], store, update, forward)
- return message
+ yield self._dispersy.store_update_forward([message], store, update, forward)
+ returnValue(message)
def on_authorize(self, messages, initializing=False):
"""
@@ -3195,9 +3300,11 @@ def on_authorize(self, messages, initializing=False):
@todo: We should raise a DelayMessageByProof to ensure that we request the proof for this
message immediately.
"""
+
for message in messages:
self.timeline.authorize(message.authentication.member, message.distribution.global_time, message.payload.permission_triplets, message)
+ @inlineCallbacks
def create_revoke(self, permission_triplets, sign_with_master=False, store=True, update=True, forward=True):
"""
Revoke permissions from a members in a community.
@@ -3249,13 +3356,19 @@ def create_revoke(self, permission_triplets, sign_with_master=False, store=True,
assert triplet[2] in (u"permit", u"authorize", u"revoke", u"undo")
meta = self.get_meta_message(u"dispersy-revoke")
- message = meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
- distribution=(self.claim_global_time(), self._claim_master_member_sequence_number(meta) if sign_with_master else meta.distribution.claim_sequence_number()),
+ claimed_global_time = yield self.claim_global_time()
+ if sign_with_master:
+ sequence_number = yield self._claim_master_member_sequence_number(meta)
+ else:
+ sequence_number = meta.distribution.claim_sequence_number()
+ message = yield meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
+ distribution=(claimed_global_time, sequence_number),
payload=(permission_triplets,))
- self._dispersy.store_update_forward([message], store, update, forward)
- return message
+ yield self._dispersy.store_update_forward([message], store, update, forward)
+ returnValue(message)
+ @inlineCallbacks
def on_revoke(self, messages, initializing=False):
"""
Process a dispersy-revoke message.
@@ -3281,8 +3394,9 @@ def on_revoke(self, messages, initializing=False):
if not initializing:
for meta, globaltime_range in changes.iteritems():
- self._update_timerange(meta, globaltime_range[0], globaltime_range[1])
+ yield self._update_timerange(meta, globaltime_range[0], globaltime_range[1])
+ @inlineCallbacks
def create_undo(self, message, sign_with_master=False, store=True, update=True, forward=True):
"""
Create a dispersy-undo-own or dispersy-undo-other message to undo MESSAGE.
@@ -3309,12 +3423,12 @@ def create_undo(self, message, sign_with_master=False, store=True, update=True,
# infinate data traffic). nodes that notice this behavior must blacklist the offending
# node. hence we ensure that we did not send an undo before
try:
- undone, = self._dispersy._database.execute(u"SELECT undone FROM sync WHERE community = ? AND member = ? AND global_time = ?",
- (self.database_id, message.authentication.member.database_id, message.distribution.global_time)).next()
+ undone, = yield self._dispersy.database.stormdb.fetchone(u"SELECT undone FROM sync WHERE community = ? AND member = ? AND global_time = ?",
+ (self.database_id, message.authentication.member.database_id, message.distribution.global_time))
- except StopIteration:
+ except TypeError:
+ # TODO(Laurens): This can probably refactored to be more nice? maybe raise something.
assert False, "The message that we want to undo does not exist. Programming error"
- return None
else:
if undone:
@@ -3323,13 +3437,18 @@ def create_undo(self, message, sign_with_master=False, store=True, update=True,
"trying to return the previous undo message")
undo_own_meta = self.get_meta_message(u"dispersy-undo-own")
undo_other_meta = self.get_meta_message(u"dispersy-undo-other")
- for packet_id, message_id, packet in self._dispersy._database.execute(
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(
u"SELECT id, meta_message, packet FROM sync WHERE community = ? AND member = ? AND meta_message IN (?, ?)",
- (self.database_id, message.authentication.member.database_id, undo_own_meta.database_id, undo_other_meta.database_id)):
+ (self.database_id, message.authentication.member.database_id, undo_own_meta.database_id, undo_other_meta.database_id))
+ for packet_id, message_id, packet in sync_packets:
self._logger.debug("checking: %s", message_id)
- msg = Packet(undo_own_meta if undo_own_meta.database_id == message_id else undo_other_meta, str(packet), packet_id).load_message()
+ if undo_own_meta.database_id == message_id:
+ p = Packet(undo_own_meta, str(packet), packet_id)
+ else:
+ p = Packet(undo_other_meta, str(packet), packet_id)
+ msg = yield p.load_message()
if message.distribution.global_time == msg.payload.global_time:
- return msg
+ returnValue(msg)
# TODO(emilon): Review this statement
# Could not find the undo message that caused the sync.undone to be True. The undone was probably
@@ -3340,8 +3459,13 @@ def create_undo(self, message, sign_with_master=False, store=True, update=True,
else:
# create the undo message
meta = self.get_meta_message(u"dispersy-undo-own" if self.my_member == message.authentication.member and not sign_with_master else u"dispersy-undo-other")
- msg = meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
- distribution=(self.claim_global_time(), self._claim_master_member_sequence_number(meta) if sign_with_master else meta.distribution.claim_sequence_number()),
+ claimed_global_time = yield self.claim_global_time()
+ if sign_with_master:
+ sequence_number = yield self._claim_master_member_sequence_number(meta)
+ else:
+ sequence_number = meta.distribution.claim_sequence_number()
+ msg = yield meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
+ distribution=(claimed_global_time, sequence_number),
payload=(message.authentication.member, message.distribution.global_time, message))
if __debug__:
@@ -3349,9 +3473,10 @@ def create_undo(self, message, sign_with_master=False, store=True, update=True,
allowed, _ = self.timeline.check(msg)
assert allowed, "create_undo was called without having the permission to undo"
- self._dispersy.store_update_forward([msg], store, update, forward)
- return msg
+ yield self._dispersy.store_update_forward([msg], store, update, forward)
+ returnValue(msg)
+ @inlineCallbacks
def check_undo(self, messages):
# Note: previously all MESSAGES have been checked to ensure that the sequence numbers are
# correct. this check takes into account the messages in the batch. hence, if one of these
@@ -3362,16 +3487,17 @@ def check_undo(self, messages):
dependencies = {}
+ return_list = []
for message in messages:
if message.payload.packet is None:
# obtain the packet that we are attempting to undo
try:
- packet_id, message_name, packet_data = self._dispersy._database.execute(u"SELECT sync.id, meta_message.name, sync.packet FROM sync JOIN meta_message ON meta_message.id = sync.meta_message WHERE sync.community = ? AND sync.member = ? AND sync.global_time = ?",
- (self.database_id, message.payload.member.database_id, message.payload.global_time)).next()
- except StopIteration:
+ packet_id, message_name, packet_data = yield self._dispersy.database.stormdb.fetchone(u"SELECT sync.id, meta_message.name, sync.packet FROM sync JOIN meta_message ON meta_message.id = sync.meta_message WHERE sync.community = ? AND sync.member = ? AND sync.global_time = ?",
+ (self.database_id, message.payload.member.database_id, message.payload.global_time))
+ except TypeError:
delay = DelayMessageByMissingMessage(message, message.payload.member, message.payload.global_time)
dependencies[message.authentication.member.public_key] = (message.distribution.sequence_number, delay)
- yield delay
+ return_list.append(delay)
continue
message.payload.packet = Packet(self.get_meta_message(message_name), str(packet_data), packet_id)
@@ -3380,7 +3506,7 @@ def check_undo(self, messages):
if not message.payload.packet.meta.undo_callback:
drop = DropMessage(message, "message does not allow undo")
dependencies[message.authentication.member.public_key] = (message.distribution.sequence_number, drop)
- yield drop
+ return_list.append(drop)
continue
# check the timeline
@@ -3388,7 +3514,7 @@ def check_undo(self, messages):
if not allowed:
delay = DelayMessageByProof(message)
dependencies[message.authentication.member.public_key] = (message.distribution.sequence_number, delay)
- yield delay
+ return_list.append(delay)
continue
# check batch dependencies
@@ -3399,24 +3525,25 @@ def check_undo(self, messages):
# MESSAGE gets the same consequence as the previous message
self._logger.debug("apply same consequence on later message (%s on #%d applies to #%d)",
consequence, sequence_number, message.distribution.sequence_number)
- yield consequence.duplicate(message)
+ return_list.append(consequence.duplicate(message))
continue
try:
- undone, = self._dispersy._database.execute(u"SELECT undone FROM sync WHERE id = ?", (message.payload.packet.packet_id,)).next()
- except StopIteration:
+ undone, = yield self._dispersy.database.stormdb.fetchone(u"SELECT undone FROM sync WHERE id = ?", (message.payload.packet.packet_id,))
+ except TypeError:
+ # TODO(Laurens): This can probably refactored to be more nice? maybe raise something.
assert False, "The conversion ensures that the packet exists in the DB. Hence this should never occur"
- undone = 0
if undone and message.name == u"dispersy-undo-own":
# look for other packets we received that undid this packet
member = message.authentication.member
undo_own_meta = self.get_meta_message(u"dispersy-undo-own")
- for packet_id, packet in self._dispersy._database.execute(
+ sync_packets = yield self._dispersy.database.stormdb.fetchall(
u"SELECT id, packet FROM sync WHERE community = ? AND member = ? AND meta_message = ?",
- (self.database_id, member.database_id, undo_own_meta.database_id)):
-
- db_msg = Packet(undo_own_meta, str(packet), packet_id).load_message()
+ (self.database_id, member.database_id, undo_own_meta.database_id))
+ for packet_id, packet in sync_packets:
+ p = Packet(undo_own_meta, str(packet), packet_id)
+ db_msg = yield p.load_message()
if message.payload.global_time == db_msg.payload.global_time:
# we've found another packet which undid this packet
if member == self.my_member:
@@ -3427,29 +3554,31 @@ def check_undo(self, messages):
# Reply to this peer with a higher (or equally) ranked message in case we have one
if db_msg.packet <= message.packet:
message.payload.process_undo = False
- yield message
+ return_list.append(message)
# the sender apparently does not have the lower dispersy-undo message, lets give it back
- self._dispersy._send_packets([message.candidate], [db_msg.packet], self, db_msg.name)
+ yield self._dispersy._send_packets([message.candidate], [db_msg.packet], self, db_msg.name)
- yield DispersyDuplicatedUndo(db_msg, message)
+ return_list.append(DispersyDuplicatedUndo(db_msg, message))
break
else:
# The new message is binary lower. As we cannot delete the old one, what we do
# instead, is we store both and mark the message we already have as undone by the new one.
# To accomplish this, we yield a DispersyDuplicatedUndo so on_undo() can mark the other
# message as undone by the newly reveived message.
- yield message
- yield DispersyDuplicatedUndo(message, db_msg)
+ return_list.append(message)
+ return_list.append(DispersyDuplicatedUndo(message, db_msg))
break
else:
# did not break, hence, the message hasn't been undone more than once.
- yield message
+ return_list.append(message)
# continue. either the message was malicious or it has already been yielded
continue
- yield message
+ return_list.append(message)
+ returnValue(iter(return_list))
+ @inlineCallbacks
def on_undo(self, messages):
"""
Undo a single message.
@@ -3472,34 +3601,37 @@ def on_undo(self, messages):
parameters.append((message.packet_id, self.database_id, message.payload.member.database_id, message.payload.global_time))
real_messages.append(message)
- self._dispersy._database.executemany(u"UPDATE sync SET undone = ? "
+ yield self._dispersy.database.stormdb.executemany(u"UPDATE sync SET undone = ? "
u"WHERE community = ? AND member = ? AND global_time = ?", parameters)
for meta, sub_messages in groupby(real_messages, key=lambda x: x.payload.packet.meta):
- meta.undo_callback([(message.payload.member, message.payload.global_time, message.payload.packet) for message in sub_messages])
+ yield meta.undo_callback([(message.payload.member, message.payload.global_time, message.payload.packet) for message in sub_messages])
+ @inlineCallbacks
def create_destroy_community(self, degree, sign_with_master=False, store=True, update=True, forward=True):
assert isinstance(degree, unicode)
assert degree in (u"soft-kill", u"hard-kill")
meta = self.get_meta_message(u"dispersy-destroy-community")
- message = meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
- distribution=(self.claim_global_time(),),
+ claimed_global_time = yield self.claim_global_time()
+ message = yield meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
+ distribution=(claimed_global_time,),
payload=(degree,))
# in this special case we need to forward the message before processing it locally.
# otherwise the candidate table will have been cleaned and we won't have any destination
# addresses.
- self._dispersy._forward([message])
+ yield self._dispersy._forward([message])
# now store and update without forwarding. forwarding now will result in new entries in our
# candidate table that we just clean.
- self._dispersy.store_update_forward([message], store, update, False)
- return message
+ yield self._dispersy.store_update_forward([message], store, update, False)
+ returnValue(message)
+ @inlineCallbacks
def on_destroy_community(self, messages):
# epidemic spread of the destroy message
- self._dispersy._forward(messages)
+ yield self._dispersy._forward(messages)
for message in messages:
assert message.name == u"dispersy-destroy-community"
@@ -3507,7 +3639,7 @@ def on_destroy_community(self, messages):
try:
# let the community code cleanup first.
- new_classification = self.dispersy_cleanup_community(message)
+ new_classification = yield self.dispersy_cleanup_community(message)
except Exception:
continue
assert issubclass(new_classification, Community)
@@ -3536,8 +3668,8 @@ def on_destroy_community(self, messages):
# we should not remove our own dispersy-identity message
try:
- packet_id, = self._dispersy._database.execute(u"SELECT id FROM sync WHERE meta_message = ? AND member = ?", (identity_message_id, self.my_member.database_id)).next()
- except StopIteration:
+ packet_id, = yield self._dispersy.database.stormdb.fetchone(u"SELECT id FROM sync WHERE meta_message = ? AND member = ?", (identity_message_id, self.my_member.database_id))
+ except TypeError:
pass
else:
identities.add(self.my_member.public_key)
@@ -3555,9 +3687,9 @@ def on_destroy_community(self, messages):
if not item.authentication.member.public_key in identities:
identities.add(item.authentication.member.public_key)
try:
- packet_id, = self._dispersy._database.execute(u"SELECT id FROM sync WHERE meta_message = ? AND member = ?",
- (identity_message_id, item.authentication.member.database_id)).next()
- except StopIteration:
+ packet_id, = yield self._dispersy.database.stormdb.fetchone(u"SELECT id FROM sync WHERE meta_message = ? AND member = ?",
+ (identity_message_id, item.authentication.member.database_id))
+ except TypeError:
pass
else:
packet_ids.add(packet_id)
@@ -3567,22 +3699,29 @@ def on_destroy_community(self, messages):
todo.extend(proofs)
# 1. cleanup the double_signed_sync table.
- self._dispersy._database.execute(u"DELETE FROM double_signed_sync WHERE sync IN (SELECT id FROM sync JOIN double_signed_sync ON sync.id = double_signed_sync.sync WHERE sync.community = ?)", (self.database_id,))
+ yield self._dispersy.database.stormdb.execute(u"DELETE FROM double_signed_sync WHERE sync IN (SELECT id FROM sync JOIN double_signed_sync ON sync.id = double_signed_sync.sync WHERE sync.community = ?)", (self.database_id,))
# 2. cleanup sync table. everything except what we need to tell others this
# community is no longer available
- self._dispersy._database.execute(u"DELETE FROM sync WHERE community = ? AND id NOT IN (" + u", ".join(u"?" for _ in packet_ids) + ")", [self.database_id] + list(packet_ids))
+ yield self._dispersy.database.stormdb.execute(u"DELETE FROM sync WHERE community = ? AND id NOT IN (" + u", ".join(u"?" for _ in packet_ids) + ")", [self.database_id] + list(packet_ids))
- self._dispersy.reclassify_community(self, new_classification)
+ yield self._dispersy.reclassify_community(self, new_classification)
+ @inlineCallbacks
def create_dynamic_settings(self, policies, sign_with_master=False, store=True, update=True, forward=True):
meta = self.get_meta_message(u"dispersy-dynamic-settings")
- message = meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
- distribution=(self.claim_global_time(), self._claim_master_member_sequence_number(meta) if sign_with_master else meta.distribution.claim_sequence_number()),
+ claimed_global_time = yield self.claim_global_time()
+ if sign_with_master:
+ sequence_number = yield self._claim_master_member_sequence_number(meta)
+ else:
+ sequence_number = meta.distribution.claim_sequence_number()
+ message = yield meta.impl(authentication=((self.master_member if sign_with_master else self.my_member),),
+ distribution=(claimed_global_time, sequence_number),
payload=(policies,))
- self._dispersy.store_update_forward([message], store, update, forward)
- return message
+ yield self._dispersy.store_update_forward([message], store, update, forward)
+ returnValue(message)
+ @inlineCallbacks
def on_dynamic_settings(self, messages, initializing=False):
assert isinstance(initializing, bool)
@@ -3597,19 +3736,21 @@ def on_dynamic_settings(self, messages, initializing=False):
if not initializing:
for meta, globaltime_range in changes.iteritems():
- self._update_timerange(meta, globaltime_range[0], globaltime_range[1])
+ yield self._update_timerange(meta, globaltime_range[0], globaltime_range[1])
+ @inlineCallbacks
def _update_timerange(self, meta, time_low, time_high):
- execute = self._dispersy._database.execute
- executemany = self._dispersy._database.executemany
+ executemany = self._dispersy.database.stormdb.executemany
+ fetchall = self._dispersy.database.stormdb.fetchall
self._logger.debug("updating %s [%d:%d]", meta.name, time_low, time_high)
undo = []
redo = []
- for packet_id, packet, undone in list(execute(u"SELECT id, packet, undone FROM sync WHERE meta_message = ? AND global_time BETWEEN ? AND ?",
- (meta.database_id, time_low, time_high))):
- message = self._dispersy.convert_packet_to_message(str(packet), self)
+ sync_packets = yield fetchall(u"SELECT id, packet, undone FROM sync WHERE meta_message = ? AND global_time BETWEEN ? AND ?",
+ (meta.database_id, time_low, time_high))
+ for packet_id, packet, undone in sync_packets:
+ message = yield self._dispersy.convert_packet_to_message(str(packet), self)
if message:
message.packet_id = packet_id
allowed, _ = self.timeline.check(message)
@@ -3628,16 +3769,17 @@ def _update_timerange(self, meta, time_low, time_high):
message.name, message.distribution.global_time)
if undo:
- executemany(u"UPDATE sync SET undone = 1 WHERE id = ?", ((message.packet_id,) for message in undo))
- meta.undo_callback([(message.authentication.member, message.distribution.global_time, message) for message in undo])
+ yield executemany(u"UPDATE sync SET undone = 1 WHERE id = ?", ((message.packet_id,) for message in undo))
+ yield meta.undo_callback([(message.authentication.member, message.distribution.global_time, message) for message in undo])
# notify that global times have changed
# meta.self.update_sync_range(meta, [message.distribution.global_time for message in undo])
if redo:
- executemany(u"UPDATE sync SET undone = 0 WHERE id = ?", ((message.packet_id,) for message in redo))
- meta.handle_callback(redo)
+ yield executemany(u"UPDATE sync SET undone = 0 WHERE id = ?", ((message.packet_id,) for message in redo))
+ yield meta.handle_callback(redo)
+ @inlineCallbacks
def _claim_master_member_sequence_number(self, meta):
"""
Tries to guess the most recent sequence number used by the master member for META in
@@ -3652,19 +3794,20 @@ def _claim_master_member_sequence_number(self, meta):
numbers are used.
"""
assert isinstance(meta.distribution, FullSyncDistribution), "currently only FullSyncDistribution allows sequence numbers"
- sequence_number, = self._dispersy._database.execute(u"SELECT COUNT(*) FROM sync WHERE member = ? AND sync.meta_message = ?",
- (self.master_member.database_id, meta.database_id)).next()
- return sequence_number + 1
+ sequence_number, = yield self._dispersy.database.stormdb.fetchone(u"SELECT COUNT(*) FROM sync WHERE member = ? AND sync.meta_message = ?",
+ (self.master_member.database_id, meta.database_id))
+ returnValue(sequence_number + 1)
class HardKilledCommunity(Community):
+ @inlineCallbacks
def initialize(self, *args, **kargs):
- super(HardKilledCommunity, self).initialize(*args, **kargs)
+ yield super(HardKilledCommunity, self).initialize(*args, **kargs)
destroy_message_id = self._meta_messages[u"dispersy-destroy-community"].database_id
try:
- packet, = self._dispersy.database.execute(u"SELECT packet FROM sync WHERE meta_message = ? LIMIT 1", (destroy_message_id,)).next()
- except StopIteration:
+ packet, = yield self._dispersy.database.stormdb.fetchone(u"SELECT packet FROM sync WHERE meta_message = ? LIMIT 1", (destroy_message_id,))
+ except TypeError:
self._logger.error("unable to locate the dispersy-destroy-community message")
self._destroy_community_packet = ""
else:
@@ -3697,7 +3840,8 @@ def get_conversion_for_packet(self, packet):
# try again
return super(HardKilledCommunity, self).get_conversion_for_packet(packet)
+ @inlineCallbacks
def on_introduction_request(self, messages):
if self._destroy_community_packet:
- self._dispersy._send_packets([message.candidate for message in messages], [self._destroy_community_packet],
+ yield self._dispersy._send_packets([message.candidate for message in messages], [self._destroy_community_packet],
self, "-caused by destroy-community-")
diff --git a/conversion.py b/conversion.py
index 09d19062..c5a33f3e 100644
--- a/conversion.py
+++ b/conversion.py
@@ -3,6 +3,7 @@
from socket import inet_ntoa, inet_aton
from struct import pack, unpack_from, Struct
import logging
+from twisted.internet.defer import inlineCallbacks, returnValue
from .authentication import Authentication, NoAuthentication, MemberAuthentication, DoubleMemberAuthentication
from .bloomfilter import BloomFilter
@@ -275,13 +276,14 @@ def _encode_missing_sequence(self, message):
message_id = self._encode_message_map[payload.message.name].byte
return (payload.member.mid, message_id, self._struct_LL.pack(payload.missing_low, payload.missing_high))
+ @inlineCallbacks
def _decode_missing_sequence(self, placeholder, offset, data):
if len(data) < offset + 29:
raise DropPacket("Insufficient packet size")
member_id = data[offset:offset + 20]
offset += 20
- member = self._community.get_member(mid=member_id)
+ member = yield self._community.get_member(mid=member_id)
if member is None:
raise DropPacket("Unknown member")
@@ -295,7 +297,7 @@ def _decode_missing_sequence(self, placeholder, offset, data):
raise DropPacket("Invalid missing_low and missing_high combination")
offset += 8
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, decode_functions.meta, missing_low, missing_high)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, decode_functions.meta, missing_low, missing_high)))
def _encode_missing_message(self, message):
"""
@@ -315,6 +317,7 @@ def _encode_missing_message(self, message):
payload = message.payload
return (self._struct_H.pack(len(payload.member.public_key)), payload.member.public_key, pack("!%dQ" % len(payload.global_times), *payload.global_times))
+ @inlineCallbacks
def _decode_missing_message(self, placeholder, offset, data):
if len(data) < offset + 2:
raise DropPacket("Insufficient packet size (_decode_missing_message.1)")
@@ -327,8 +330,8 @@ def _decode_missing_message(self, placeholder, offset, data):
key = data[offset:offset + key_length]
try:
- member = self._community.dispersy.get_member(public_key=key)
- except:
+ member = yield self._community.dispersy.get_member(public_key=key)
+ except Exception:
raise DropPacket("Invalid cryptographic key (_decode_missing_message)")
offset += key_length
@@ -342,11 +345,12 @@ def _decode_missing_message(self, placeholder, offset, data):
global_times = unpack_from("!%dQ" % global_time_length, data, offset)
offset += 8 * len(global_times)
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_times)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_times)))
def _encode_signature_request(self, message):
return (self._struct_H.pack(message.payload.identifier), message.payload.message.packet)
+ @inlineCallbacks
def _decode_signature_request(self, placeholder, offset, data):
if len(data) < offset + 2:
raise DropPacket("Insufficient packet size (_decode_signature_request)")
@@ -354,15 +358,18 @@ def _decode_signature_request(self, placeholder, offset, data):
identifier, = self._struct_H.unpack_from(data, offset)
offset += 2
- message = self.decode_message(placeholder.candidate, data[offset:], True, True)
+ message = yield self.decode_message(placeholder.candidate, data[offset:], True, True)
offset = len(data)
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, identifier, message)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, identifier, message)))
+ @inlineCallbacks
def _encode_signature_response(self, message):
- return (self._struct_H.pack(message.payload.identifier), self.encode_message(message.payload.message))
+ encoded_message = yield self.encode_message(message.payload.message)
+ returnValue((self._struct_H.pack(message.payload.identifier), encoded_message))
# return message.payload.identifier, message.payload.signature
+ @inlineCallbacks
def _decode_signature_response(self, placeholder, offset, data):
if len(data) < offset + 2:
raise DropPacket("Insufficient packet size (_decode_signature_request)")
@@ -370,10 +377,10 @@ def _decode_signature_response(self, placeholder, offset, data):
identifier, = self._struct_H.unpack_from(data, offset)
offset += 2
- message = self.decode_message(placeholder.candidate, data[offset:], True, True)
+ message = yield self.decode_message(placeholder.candidate, data[offset:], True, True)
offset = len(data)
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, identifier, message)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, identifier, message)))
def _encode_identity(self, message):
return ()
@@ -451,6 +458,7 @@ def _encode_authorize(self, message):
return tuple(data)
+ @inlineCallbacks
def _decode_authorize(self, placeholder, offset, data):
permission_map = {u"permit": int("0001", 2), u"authorize": int("0010", 2), u"revoke": int("0100", 2), u"undo": int("1000", 2)}
permission_triplets = []
@@ -467,8 +475,8 @@ def _decode_authorize(self, placeholder, offset, data):
key = data[offset:offset + key_length]
try:
- member = self._community.dispersy.get_member(public_key=key)
- except:
+ member = yield self._community.dispersy.get_member(public_key=key)
+ except Exception:
raise DropPacket("Invalid cryptographic key (_decode_authorize)")
offset += key_length
@@ -502,7 +510,7 @@ def _decode_authorize(self, placeholder, offset, data):
permission_triplets.append((member, message, permission))
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, permission_triplets)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, permission_triplets)))
def _encode_revoke(self, message):
"""
@@ -547,6 +555,7 @@ def _encode_revoke(self, message):
return tuple(data)
+ @inlineCallbacks
def _decode_revoke(self, placeholder, offset, data):
permission_map = {u"permit": int("0001", 2), u"authorize": int("0010", 2), u"revoke": int("0100", 2), u"undo": int("1000", 2)}
permission_triplets = []
@@ -563,8 +572,8 @@ def _decode_revoke(self, placeholder, offset, data):
key = data[offset:offset + key_length]
try:
- member = self._community.dispersy.get_member(public_key=key)
- except:
+ member = yield self._community.dispersy.get_member(public_key=key)
+ except Exception:
raise DropPacket("Invalid cryptographic key (_decode_revoke)")
offset += key_length
@@ -595,7 +604,7 @@ def _decode_revoke(self, placeholder, offset, data):
if permission_bit & permission_bits:
permission_triplets.append((member, message, permission))
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, permission_triplets)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, permission_triplets)))
def _encode_undo_own(self, message):
return (self._struct_Q.pack(message.payload.global_time),)
@@ -620,6 +629,7 @@ def _encode_undo_other(self, message):
assert message.payload.member.public_key
return (self._struct_H.pack(len(public_key)), public_key, self._struct_Q.pack(message.payload.global_time))
+ @inlineCallbacks
def _decode_undo_other(self, placeholder, offset, data):
if len(data) < offset + 2:
raise DropPacket("Insufficient packet size")
@@ -632,8 +642,8 @@ def _decode_undo_other(self, placeholder, offset, data):
public_key = data[offset:offset + key_length]
try:
- member = self._community.dispersy.get_member(public_key=public_key)
- except:
+ member = yield self._community.dispersy.get_member(public_key=public_key)
+ except Exception:
raise DropPacket("Invalid cryptographic key (_decode_revoke)")
offset += key_length
@@ -646,12 +656,13 @@ def _decode_undo_other(self, placeholder, offset, data):
if not global_time < placeholder.distribution.global_time:
raise DropPacket("Invalid global time (trying to apply undo to the future)")
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_time)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_time)))
def _encode_missing_proof(self, message):
payload = message.payload
return (self._struct_QH.pack(payload.global_time, len(payload.member.public_key)), payload.member.public_key)
+ @inlineCallbacks
def _decode_missing_proof(self, placeholder, offset, data):
if len(data) < offset + 10:
raise DropPacket("Insufficient packet size (_decode_missing_proof)")
@@ -661,12 +672,12 @@ def _decode_missing_proof(self, placeholder, offset, data):
key = data[offset:offset + key_length]
try:
- member = self._community.dispersy.get_member(public_key=key)
- except:
+ member = yield self._community.dispersy.get_member(public_key=key)
+ except Exception:
raise DropPacket("Invalid cryptographic key (_decode_missing_proof)")
offset += key_length
- return offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_time)
+ returnValue((offset, placeholder.meta.payload.Implementation(placeholder.meta.payload, member, global_time)))
def _encode_dynamic_settings(self, message):
data = []
@@ -972,7 +983,8 @@ def can_encode_message(self, message):
assert isinstance(message, (Message, Message.Implementation)), type(message)
return message.name in self._encode_message_map
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1.name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1.name}")
+ @inlineCallbacks
def encode_message(self, message, sign=True):
assert isinstance(message, Message.Implementation), message
assert message.name in self._encode_message_map, message.name
@@ -982,23 +994,24 @@ def encode_message(self, message, sign=True):
container = [self._prefix, encode_functions.byte]
# authentication
- encode_functions.authentication(container, message)
+ yield encode_functions.authentication(container, message)
# resolution
- encode_functions.resolution(container, message)
+ yield encode_functions.resolution(container, message)
# distribution
- encode_functions.distribution(container, message)
+ yield encode_functions.distribution(container, message)
# payload
- payload = encode_functions.payload(message)
+ payload = yield encode_functions.payload(message)
assert isinstance(payload, (tuple, list)), (type(payload), encode_functions.payload)
assert all(isinstance(x, str) for x in payload)
container.extend(payload)
# sign
packet = "".join(container)
- return packet + message.authentication.sign(packet)
+ res = packet + message.authentication.sign(packet)
+ returnValue(res)
#
# Decoding
@@ -1061,6 +1074,7 @@ def _decode_no_authentication(self, placeholder):
placeholder.first_signature_offset = len(placeholder.data)
placeholder.authentication = NoAuthentication.Implementation(placeholder.meta.authentication)
+ @inlineCallbacks
def _decode_member_authentication(self, placeholder):
authentication = placeholder.meta.authentication
offset = placeholder.offset
@@ -1073,7 +1087,7 @@ def _decode_member_authentication(self, placeholder):
member_id = data[offset:offset + 20]
offset += 20
- member = self._community.get_member(mid=member_id)
+ member = yield self._community.get_member(mid=member_id)
# If signatures and verification are enabled, verify that the signature matches the member sha1 identifier
if member:
placeholder.offset = offset
@@ -1093,7 +1107,7 @@ def _decode_member_authentication(self, placeholder):
offset += key_length
try:
- member = self._community.get_member(public_key=key)
+ member = yield self._community.get_member(public_key=key)
except:
raise DropPacket("Invalid cryptographic key (_decode_member_authentication)")
@@ -1106,6 +1120,7 @@ def _decode_member_authentication(self, placeholder):
else:
raise NotImplementedError(encoding)
+ @inlineCallbacks
def _decode_double_member_authentication(self, placeholder):
authentication = placeholder.meta.authentication
offset = placeholder.offset
@@ -1116,7 +1131,7 @@ def _decode_double_member_authentication(self, placeholder):
if encoding == "sha1":
for _ in range(2):
member_id = data[offset:offset + 20]
- member = self._community.get_member(mid=member_id)
+ member = yield self._community.get_member(mid=member_id)
if not member:
raise DelayPacketByMissingMember(self._community, member_id)
offset += 20
@@ -1132,9 +1147,9 @@ def _decode_double_member_authentication(self, placeholder):
key = data[offset:offset + key_length]
offset += key_length
try:
- member = self._community.dispersy.get_member(public_key=key)
+ member = yield self._community.dispersy.get_member(public_key=key)
members.append(member)
- except:
+ except Exception:
raise DropPacket("Invalid cryptographic key1 (_decode_double_member_authentication)")
else:
@@ -1173,7 +1188,8 @@ def decode_meta_message(self, data):
return self._decode_message_map[data[22]].meta
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {return_value}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {return_value}")
+ @inlineCallbacks
def decode_message(self, candidate, data, verify=True, allow_empty_signature=False, source="unknown"):
"""
Decode a binary string into a Message structure, with some
@@ -1193,30 +1209,32 @@ def decode_message(self, candidate, data, verify=True, allow_empty_signature=Fal
if not self.can_decode_message(data):
raise DropPacket("Cannot decode message")
+ # The function pointer obtained here may point to a function that returns a deferred.
decode_functions = self._decode_message_map[data[22]]
# placeholder
placeholder = self.Placeholder(candidate, decode_functions.meta, 23, data, verify, allow_empty_signature)
# authentication
- decode_functions.authentication(placeholder)
+ yield decode_functions.authentication(placeholder)
+
assert isinstance(placeholder.authentication, Authentication.Implementation), placeholder.authentication
# resolution
- decode_functions.resolution(placeholder)
+ yield decode_functions.resolution(placeholder)
assert isinstance(placeholder.resolution, Resolution.Implementation)
# destination
- decode_functions.destination(placeholder)
+ yield decode_functions.destination(placeholder)
assert isinstance(placeholder.destination, Destination.Implementation)
# distribution
- decode_functions.distribution(placeholder)
+ yield decode_functions.distribution(placeholder)
assert isinstance(placeholder.distribution, Distribution.Implementation)
# payload
payload = placeholder.data[:placeholder.first_signature_offset]
- placeholder.offset, placeholder.payload = decode_functions.payload(placeholder, placeholder.offset, payload)
+ placeholder.offset, placeholder.payload = yield decode_functions.payload(placeholder, placeholder.offset, payload)
if placeholder.offset != placeholder.first_signature_offset:
self._logger.warning("invalid packet size for %s data:%d; offset:%d",
placeholder.meta.name, placeholder.first_signature_offset, placeholder.offset)
@@ -1226,10 +1244,12 @@ def decode_message(self, candidate, data, verify=True, allow_empty_signature=Fal
assert isinstance(placeholder.offset, (int, long))
# verify payload
- if placeholder.verify and not placeholder.authentication.has_valid_signature_for(placeholder, payload):
+ has_valid_signature = yield placeholder.authentication.has_valid_signature_for(placeholder, payload)
+ if placeholder.verify and not has_valid_signature:
raise DropPacket("Invalid signature")
- return placeholder.meta.Implementation(placeholder.meta, placeholder.authentication, placeholder.resolution, placeholder.distribution, placeholder.destination, placeholder.payload, conversion=self, candidate=candidate, source=source, packet=placeholder.data)
+ placeholder_impl = placeholder.meta.Implementation(placeholder.meta, placeholder.authentication, placeholder.resolution, placeholder.distribution, placeholder.destination, placeholder.payload, conversion=self, candidate=candidate, source=source, packet=placeholder.data)
+ returnValue(placeholder_impl)
def __str__(self):
return "<%s %s%s [%s]>" % (self.__class__.__name__, self.dispersy_version.encode("HEX"), self.community_version.encode("HEX"), ", ".join(self._encode_message_map.iterkeys()))
@@ -1258,7 +1278,7 @@ def define(value, name, encode, decode):
# 255 is reserved
define(254, u"dispersy-missing-sequence", self._encode_missing_sequence, self._decode_missing_sequence)
- define(253, u"dispersy-missing-proof", self._encode_missing_proof, self._decode_missing_proof)
+ define(253, u"dispersy-missing-proof", self._encode_missing_proof, self._decode_missing_proof) # self_decode_missing_proof returns a deferred.
define(252, u"dispersy-signature-request", self._encode_signature_request, self._decode_signature_request)
define(251, u"dispersy-signature-response", self._encode_signature_response, self._decode_signature_response)
define(250, u"dispersy-puncture-request", self._encode_puncture_request, self._decode_puncture_request)
diff --git a/crypto.py b/crypto.py
index 00d0b8ce..d82b9e64 100644
--- a/crypto.py
+++ b/crypto.py
@@ -115,7 +115,7 @@ def security_levels(self):
"""
return _CURVES.keys()
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def generate_key(self, security_level):
"""
Generate a new Elliptic Curve object with a new public / private key pair.
@@ -153,7 +153,7 @@ def key_to_hash(self, ec):
assert isinstance(ec, DispersyKey), ec
return ec.key_to_hash()
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def is_valid_private_bin(self, string):
"Returns True if the input is a valid public/private keypair stored in a binary format"
try:
@@ -162,7 +162,7 @@ def is_valid_private_bin(self, string):
return False
return True
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def is_valid_public_bin(self, string):
"Returns True if the input is a valid public key"
try:
@@ -262,7 +262,7 @@ def pub(self):
def has_secret_key(self):
return False
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def pem_to_bin(self, pem):
"""
Convert a key in the PEM format into a key in the binary format.
@@ -270,26 +270,26 @@ def pem_to_bin(self, pem):
"""
return "".join(pem.split("\n")[1:-2]).decode("BASE64")
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def key_to_pem(self):
"Convert a key to the PEM format."
bio = BIO.MemoryBuffer()
self.ec.save_pub_key_bio(bio)
return bio.read_all()
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def key_from_pem(self, pem):
"Get the EC from a public PEM."
return EC.load_pub_key_bio(BIO.MemoryBuffer(pem))
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def key_to_bin(self):
return self.pem_to_bin(self.key_to_pem())
def get_signature_length(self):
return int(ceil(len(self.ec) / 8.0)) * 2
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def verify(self, signature, data):
length = len(signature) / 2
r = signature[:length]
@@ -345,21 +345,21 @@ def pub(self):
def has_secret_key(self):
return True
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def key_to_pem(self):
"Convert a key to the PEM format."
bio = BIO.MemoryBuffer()
self.ec.save_key_bio(bio, None, lambda *args: "")
return bio.read_all()
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def key_from_pem(self, pem):
"Get the EC from a public/private keypair stored in the PEM."
def get_password(*args):
return ""
return EC.load_key_bio(BIO.MemoryBuffer(pem), get_password)
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def signature(self, msg):
length = int(ceil(len(self.ec) / 8.0))
digest = sha1(msg).digest()
@@ -389,7 +389,7 @@ def pub(self):
def has_secret_key(self):
return False
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def verify(self, signature, msg):
return self.veri.verify(signature + msg)
@@ -416,7 +416,7 @@ def pub(self):
def has_secret_key(self):
return True
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name}")
def signature(self, msg):
return self.key.signature(msg)
diff --git a/database.py b/database.py
index 37779910..b90fccba 100644
--- a/database.py
+++ b/database.py
@@ -6,11 +6,15 @@
@contact: dispersy@frayja.com
"""
import logging
+import os
import sys
+import tempfile
import thread
from abc import ABCMeta, abstractmethod
-from sqlite3 import Connection
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+from StormDBManager import StormDBManager
from .util import attach_runtime_statistics
@@ -18,17 +22,21 @@
_explain_query_plan_logger = logging.getLogger("explain-query-plan")
_explain_query_plan = set()
+
def attach_explain_query_plan(func):
+ @inlineCallbacks
def attach_explain_query_plan_helper(self, statements, bindings=()):
if not statements in _explain_query_plan:
_explain_query_plan.add(statements)
_explain_query_plan_logger.info("Explain query plan for <<<%s>>>", statements)
- for line in self._cursor.execute(u"EXPLAIN QUERY PLAN %s" % statements, bindings):
+ query_plan = yield self.stormdb.fetchall(u"EXPLAIN QUERY PLAN %s" % statements, bindings)
+ for line in query_plan:
_explain_query_plan_logger.info(line)
_explain_query_plan_logger.info("--")
- return func(self, statements, bindings)
+ return_value = yield func(self, statements, bindings)
+ returnValue(return_value)
attach_explain_query_plan_helper.__name__ = func.__name__
return attach_explain_query_plan_helper
@@ -76,6 +84,15 @@ def __init__(self, file_path):
self._connection = None
self._cursor = None
self._database_version = 0
+ self.temp_db_path = None
+ # Storm does not know :memory: and it doesn't work when doing things multi-threaded. So generate a tmp database
+ # Note that this database is a file database.
+ if self._file_path == ":memory:":
+ temp_dir = tempfile.mkdtemp(prefix="dispersy_tmp_db_")
+ self.temp_db_path = os.path.join(temp_dir, u"test.db")
+ self.stormdb = StormDBManager("sqlite:%s" % self.temp_db_path)
+ else:
+ self.stormdb = StormDBManager("sqlite:%s" % self._file_path)
# _commit_callbacks contains a list with functions that are called on each database commit
self._commit_callbacks = []
@@ -87,18 +104,20 @@ def __init__(self, file_path):
if __debug__:
self._debug_thread_ident = 0
+ @inlineCallbacks
def open(self, initial_statements=True, prepare_visioning=True):
assert self._cursor is None, "Database.open() has already been called"
assert self._connection is None, "Database.open() has already been called"
if __debug__:
self._debug_thread_ident = thread.get_ident()
self._logger.debug("open database [%s]", self._file_path)
+ yield self.stormdb.initialize()
self._connect()
if initial_statements:
- self._initial_statements()
+ yield self._initial_statements()
if prepare_visioning:
- self._prepare_version()
- return True
+ yield self._prepare_version()
+ returnValue(True)
def close(self, commit=True):
assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
@@ -110,20 +129,27 @@ def close(self, commit=True):
self._cursor = None
self._connection.close()
self._connection = None
+ # Clean up the temp database.
+ if self.temp_db_path and os.path.exists(self.temp_db_path):
+ os.remove(self.temp_db_path)
return True
def _connect(self):
- self._connection = Connection(self._file_path)
- self._cursor = self._connection.cursor()
+ self._connection = self.stormdb.connection
+ self._cursor = self.stormdb._cursor
+ @inlineCallbacks
def _initial_statements(self):
assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
assert self._connection is not None, "Database.close() has been called or Database.open() has not been called"
# collect current database configuration
- page_size = int(next(self._cursor.execute(u"PRAGMA page_size"))[0])
- journal_mode = unicode(next(self._cursor.execute(u"PRAGMA journal_mode"))[0]).upper()
- synchronous = unicode(next(self._cursor.execute(u"PRAGMA synchronous"))[0]).upper()
+ db_page_size = yield self.stormdb.fetchone(u"PRAGMA page_size")
+ page_size = int(db_page_size[0])
+ db_journal_mode = yield self.stormdb.fetchone(u"PRAGMA journal_mode")
+ journal_mode = unicode(db_journal_mode[0]).upper()
+ db_synchronous = yield self.stormdb.fetchone(u"PRAGMA synchronous")
+ synchronous = unicode(db_synchronous[0]).upper()
#
# PRAGMA page_size = bytes;
@@ -137,11 +163,10 @@ def _initial_statements(self):
# it is not possible to change page_size when WAL is enabled
if journal_mode == u"WAL":
- self._cursor.executescript(u"PRAGMA journal_mode = DELETE")
+ yield self.stormdb.execute(u"PRAGMA journal_mode = DELETE")
journal_mode = u"DELETE"
- self._cursor.execute(u"PRAGMA page_size = 8192")
- self._cursor.execute(u"VACUUM")
- page_size = 8192
+ yield self.stormdb.execute(u"PRAGMA page_size = 8192")
+ yield self.stormdb.execute(u"VACUUM")
else:
self._logger.debug("PRAGMA page_size = %s (no change) [%s]", page_size, self._file_path)
@@ -152,8 +177,8 @@ def _initial_statements(self):
#
if not (journal_mode == u"WAL" or self._file_path == u":memory:"):
self._logger.debug("PRAGMA journal_mode = WAL (previously: %s) [%s]", journal_mode, self._file_path)
- self._cursor.execute(u"PRAGMA locking_mode = EXCLUSIVE")
- self._cursor.execute(u"PRAGMA journal_mode = WAL")
+ yield self.stormdb.execute(u"PRAGMA locking_mode = EXCLUSIVE")
+ yield self.stormdb.execute(u"PRAGMA journal_mode = WAL")
else:
self._logger.debug("PRAGMA journal_mode = %s (no change) [%s]", journal_mode, self._file_path)
@@ -164,33 +189,34 @@ def _initial_statements(self):
#
if not synchronous in (u"NORMAL", u"1"):
self._logger.debug("PRAGMA synchronous = NORMAL (previously: %s) [%s]", synchronous, self._file_path)
- self._cursor.execute(u"PRAGMA synchronous = NORMAL")
+ yield self.stormdb.execute(u"PRAGMA synchronous = NORMAL")
else:
self._logger.debug("PRAGMA synchronous = %s (no change) [%s]", synchronous, self._file_path)
+ @inlineCallbacks
def _prepare_version(self):
assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
assert self._connection is not None, "Database.close() has been called or Database.open() has not been called"
# check is the database contains an 'option' table
try:
- count, = next(self.execute(u"SELECT COUNT(*) FROM sqlite_master WHERE type = 'table' AND name = 'option'"))
- except StopIteration:
+ count, = yield self.stormdb.fetchone(u"SELECT COUNT(*) FROM sqlite_master WHERE type = 'table' AND name = 'option'")
+ except TypeError:
raise RuntimeError()
if count:
# get version from required 'option' table
try:
- version, = next(self.execute(u"SELECT value FROM option WHERE key == 'database_version' LIMIT 1"))
- except StopIteration:
+ version, = yield self.stormdb.fetchone(u"SELECT value FROM option WHERE key == 'database_version' LIMIT 1")
+ except TypeError:
# the 'database_version' key was not found
version = u"0"
else:
# the 'option' table probably hasn't been created yet
version = u"0"
- self._database_version = self.check_database(version)
+ self._database_version = yield self.check_database(version)
assert isinstance(self._database_version, (int, long)), type(self._database_version)
@property
@@ -247,8 +273,8 @@ def __exit__(self, exc_type, exc_value, traceback):
# returning False to let Python reraise the exception.
return False
- @attach_explain_query_plan
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
+ #@attach_explain_query_plan
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
def execute(self, statement, bindings=(), get_lastrowid=False):
"""
Execute one SQL statement.
@@ -295,7 +321,7 @@ def execute(self, statement, bindings=(), get_lastrowid=False):
result = self._cursor.lastrowid
return result
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
def executescript(self, statements):
assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
assert self._connection is not None, "Database.close() has been called or Database.open() has not been called"
@@ -306,8 +332,8 @@ def executescript(self, statements):
self._logger.log(logging.NOTSET, "%s [%s]", statements, self._file_path)
return self._cursor.executescript(statements)
- @attach_explain_query_plan
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
+ #@attach_explain_query_plan
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} {1} [{0.file_path}]")
def executemany(self, statement, sequenceofbindings):
"""
Execute one SQL statement several times.
@@ -366,7 +392,7 @@ def executemany(self, statement, sequenceofbindings):
self._logger.log(logging.NOTSET, "%s [%s]", statement, self._file_path)
return self._cursor.executemany(statement, sequenceofbindings)
- @attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} [{0.file_path}]")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}.{function_name} [{0.file_path}]")
def commit(self, exiting=False):
assert self._cursor is not None, "Database.close() has been called or Database.open() has not been called"
assert self._connection is not None, "Database.close() has been called or Database.open() has not been called"
diff --git a/discovery/community.py b/discovery/community.py
index fad09613..462791c4 100644
--- a/discovery/community.py
+++ b/discovery/community.py
@@ -6,6 +6,7 @@
from time import time
from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall
from ..authentication import MemberAuthentication, NoAuthentication
@@ -174,6 +175,7 @@ def __hash__(self):
class DiscoveryCommunity(Community):
+ @inlineCallbacks
def initialize(self, max_prefs=25, max_tbs=25):
self._logger.debug('initializing DiscoveryComunity, max_prefs = %d, max_tbs = %d', max_prefs, max_tbs)
@@ -214,7 +216,7 @@ def on_bootstrap_started(_):
self.register_task('create_ping_requests',
LoopingCall(self.create_ping_requests)).start(PING_INTERVAL)
- super(DiscoveryCommunity, self).initialize()
+ yield super(DiscoveryCommunity, self).initialize()
def unload_community(self):
super(DiscoveryCommunity, self).unload_community()
@@ -245,6 +247,7 @@ def dispersy_get_walk_candidate(self):
return candidate
@classmethod
+ @inlineCallbacks
def get_master_members(cls, dispersy):
# generated: Fri Apr 25 13:37:28 2014
# curve: NID_sect571r1
@@ -259,8 +262,8 @@ def get_master_members(cls, dispersy):
#-----END PUBLIC KEY-----
master_key = "3081a7301006072a8648ce3d020106052b81040027038192000403b3ab059ced9b20646ab5e01762b3595c5e8855227ae1e424cff38a1e4edee73734ff2e2e829eb4f39bab20d7578284fcba7251acd74e7daf96f21d01ea17077faf4d27a655837d072baeb671287a88554e1191d8904b0dc572d09ff95f10ff092c8a5e2a01cd500624376aec875a6e3028aab784cfaf0bac6527245db8d93900d904ac2a922a02716ccef5a22f7968".decode(
"HEX")
- master = dispersy.get_member(public_key=master_key)
- return [master]
+ master = yield dispersy.get_member(public_key=master_key)
+ returnValue([master])
def initiate_meta_messages(self):
meta_messages = super(DiscoveryCommunity, self).initiate_meta_messages()
@@ -480,10 +483,12 @@ def __init__(self, community, requested_candidate, preference_list, allow_sync):
self.preference_list = preference_list
self.allow_sync = allow_sync
+ @inlineCallbacks
def on_timeout(self):
- self.community.send_introduction_request(self.requested_candidate, allow_sync=self.allow_sync)
+ yield self.community.send_introduction_request(self.requested_candidate, allow_sync=self.allow_sync)
self.community.peer_cache.inc_num_fails(self.requested_candidate)
+ @inlineCallbacks
def create_introduction_request(self, destination, allow_sync, forward=True, is_fast_walker=False):
assert isinstance(destination, WalkCandidate), [type(destination), destination]
@@ -492,11 +497,12 @@ def create_introduction_request(self, destination, allow_sync, forward=True, is_
send = False
if not self.is_recent_taste_buddy(destination):
- send = self.create_similarity_request(destination, allow_sync=allow_sync)
+ send = yield self.create_similarity_request(destination, allow_sync=allow_sync)
if not send:
- self.send_introduction_request(destination, allow_sync=allow_sync)
+ yield self.send_introduction_request(destination, allow_sync=allow_sync)
+ @inlineCallbacks
def create_similarity_request(self, destination, allow_sync=True):
payload = self.my_preferences()[:self.max_prefs]
if payload:
@@ -507,17 +513,18 @@ def create_similarity_request(self, destination, allow_sync=True):
destination, cache.number, len(payload))
meta_request = self.get_meta_message(u"similarity-request")
- request = meta_request.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(destination,), payload=(
+ request = yield meta_request.impl(authentication=(self.my_member,), distribution=(self.global_time,), destination=(destination,), payload=(
cache.number, self._dispersy.lan_address, self._dispersy.wan_address, self._dispersy.connection_type, payload))
- if self._dispersy._forward([request]):
+ forward_result = yield self._dispersy._forward([request])
+ if forward_result:
self.send_packet_size += len(request.packet)
self._logger.debug("DiscoveryCommunity: sending similarity request to %s containing %d preferences: %s",
destination, len(payload), [preference.encode('HEX') for preference in payload])
- return True
+ returnValue(True)
- return False
+ returnValue(False)
def check_similarity_request(self, messages):
for message in messages:
@@ -532,6 +539,7 @@ def check_similarity_request(self, messages):
yield message
+ @inlineCallbacks
def on_similarity_request(self, messages):
meta = self.get_meta_message(u"similarity-response")
@@ -572,13 +580,13 @@ def on_similarity_request(self, messages):
bitfields.append((tb.candidate_mid, bitfield))
payload = (message.payload.identifier, self.my_preferences()[:self.max_prefs], bitfields)
- response_message = meta.impl(
+ response_message = yield meta.impl(
authentication=(self.my_member,), distribution=(self.global_time,), payload=payload)
self._logger.debug("DiscoveryCommunity: sending similarity response to %s containing %s",
message.candidate, [preference.encode('HEX') for preference in payload[1]])
- self._dispersy._send([message.candidate], [response_message])
+ yield self._dispersy._send([message.candidate], [response_message])
def compute_overlap(self, his_prefs, my_prefs=None):
return len(set(his_prefs) & set(my_prefs or self.my_preferences()))
@@ -597,6 +605,7 @@ def check_similarity_response(self, messages):
yield message
+ @inlineCallbacks
def on_similarity_response(self, messages):
for message in messages:
# Update possible taste buddies.
@@ -634,20 +643,22 @@ def on_similarity_response(self, messages):
self.add_possible_taste_buddies(possibles)
destination, introduce_me_to = self.get_most_similar(w_candidate)
- self.send_introduction_request(destination, introduce_me_to, request.allow_sync)
+ yield self.send_introduction_request(destination, introduce_me_to, request.allow_sync)
self.reply_packet_size += len(message.packet)
+ @inlineCallbacks
def send_introduction_request(self, destination, introduce_me_to=None, allow_sync=True):
assert isinstance(destination, WalkCandidate), [type(destination), destination]
assert not introduce_me_to or isinstance(introduce_me_to, str), type(introduce_me_to)
extra_payload = [introduce_me_to]
- super(DiscoveryCommunity, self).create_introduction_request(destination, allow_sync, extra_payload=extra_payload)
+ yield super(DiscoveryCommunity, self).create_introduction_request(destination, allow_sync, extra_payload=extra_payload)
self._logger.debug("DiscoveryCommunity: sending introduction-request to %s (%s,%s)", destination,
introduce_me_to.encode("HEX") if introduce_me_to else '', allow_sync)
+ @inlineCallbacks
def on_introduction_request(self, messages):
for message in messages:
introduce_me_to = ''
@@ -655,7 +666,7 @@ def on_introduction_request(self, messages):
ctb = self.is_taste_buddy(message.candidate)
self._logger.debug("Got intro request from %s %s", ctb, ctb.overlap if ctb else 0)
- rtb = self.get_tb_or_candidate_mid(message.payload.introduce_me_to)
+ rtb = yield self.get_tb_or_candidate_mid(message.payload.introduce_me_to)
if rtb:
self.requested_introductions[message.candidate.get_member().mid] = introduce_me_to = rtb
@@ -663,14 +674,16 @@ def on_introduction_request(self, messages):
message.payload.introduce_me_to.encode("HEX") if message.payload.introduce_me_to else '-',
introduce_me_to, self.requested_introductions)
- super(DiscoveryCommunity, self).on_introduction_request(messages)
+ yield super(DiscoveryCommunity, self).on_introduction_request(messages)
+ @inlineCallbacks
def get_tb_or_candidate_mid(self, mid):
tb = self.is_taste_buddy_mid(mid)
if tb:
- return tb.candidate
+ returnValue(tb.candidate)
- return self.get_candidate_mid(mid)
+ candidate_mid = yield self.get_candidate_mid(mid)
+ returnValue(candidate_mid)
def dispersy_get_introduce_candidate(self, exclude_candidate=None):
if exclude_candidate:
@@ -693,16 +706,18 @@ def on_timeout(self):
self._logger.debug("DiscoveryCommunity: no response on ping, removing from taste_buddies %s", self.requested_candidate)
self.community.remove_taste_buddy(self.requested_candidate)
+ @inlineCallbacks
def create_ping_requests(self):
tbs = list(self.yield_taste_buddies())[:self.max_tbs]
for tb in tbs:
if tb.time_remaining() < PING_INTERVAL:
cache = self._request_cache.add(DiscoveryCommunity.PingRequestCache(self, tb.candidate))
- self._create_pingpong(u"ping", tb.candidate, cache.number)
+ yield self._create_pingpong(u"ping", tb.candidate, cache.number)
+ @inlineCallbacks
def on_ping(self, messages):
for message in messages:
- self._create_pingpong(u"pong", message.candidate, message.payload.identifier)
+ yield self._create_pingpong(u"pong", message.candidate, message.payload.identifier)
self._logger.debug("DiscoveryCommunity: got ping from %s", message.candidate)
self.reset_taste_buddy(message.candidate)
@@ -731,10 +746,11 @@ def on_pong(self, messages):
self.reset_taste_buddy(message.candidate)
+ @inlineCallbacks
def _create_pingpong(self, meta_name, candidate, identifier):
meta = self.get_meta_message(meta_name)
- message = meta.impl(distribution=(self.global_time,), payload=(identifier,))
- self._dispersy._send([candidate, ], [message])
+ message = yield meta.impl(distribution=(self.global_time,), payload=(identifier,))
+ yield self._dispersy._send([candidate, ], [message])
self._logger.debug("DiscoveryCommunity: send %s to %s",
meta_name, str(candidate))
diff --git a/dispersy.py b/dispersy.py
index 4c4ce91f..4bd41762 100644
--- a/dispersy.py
+++ b/dispersy.py
@@ -40,7 +40,7 @@
import os
from collections import defaultdict, Iterable, OrderedDict
from hashlib import sha1
-from itertools import groupby, count
+from itertools import groupby, count, product
from pprint import pformat
from socket import inet_aton
from struct import unpack_from
@@ -48,7 +48,7 @@
import netifaces
from twisted.internet import reactor
-from twisted.internet.defer import maybeDeferred, gatherResults
+from twisted.internet.defer import maybeDeferred, gatherResults, inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall
from twisted.python.failure import Failure
from twisted.python.threadable import isInIOThread
@@ -155,8 +155,11 @@ def __init__(self, endpoint, working_directory, database_filename=u"dispersy.db"
# progress handlers (used to notify the user when something will take a long time)
self._progress_handlers = []
+ @inlineCallbacks
+ def initialize_statistics(self):
# statistics...
self._statistics = DispersyStatistics(self)
+ yield self._statistics.initialize()
@staticmethod
def _get_interface_addresses():
@@ -373,6 +376,7 @@ def statistics(self):
"""
return self._statistics
+ @inlineCallbacks
def define_auto_load(self, community_cls, my_member, args=(), kargs=None, load=False):
"""
Tell Dispersy how to load COMMUNITY if need be.
@@ -401,15 +405,16 @@ def define_auto_load(self, community_cls, my_member, args=(), kargs=None, load=F
communities = []
if load:
- for master in community_cls.get_master_members(self):
+ master_members = yield community_cls.get_master_members(self)
+ for master in master_members:
if not master.mid in self._communities:
self._logger.debug("Loading %s at start", community_cls.get_classification())
- community = community_cls.init_community(self, master, my_member, *args, **kargs)
+ community = yield community_cls.init_community(self, master, my_member, *args, **kargs)
communities.append(community)
assert community.master_member.mid == master.mid
assert community.master_member.mid in self._communities
- return communities
+ returnValue(communities)
def undefine_auto_load(self, community):
"""
@@ -445,6 +450,7 @@ def detach_progress_handler(self, func):
def get_progress_handlers(self):
return self._progress_handlers
+ @inlineCallbacks
def get_member(self, mid="", public_key="", private_key=""):
"""Returns a Member instance associated with public_key.
@@ -477,7 +483,7 @@ def get_member(self, mid="", public_key="", private_key=""):
member = self._member_cache_by_hash.get(mid)
if member:
- return member
+ returnValue(member)
if private_key:
key = self.crypto.key_from_private_bin(private_key)
@@ -489,7 +495,9 @@ def get_member(self, mid="", public_key="", private_key=""):
# both public and private keys are valid at this point
# The member is not cached, let's try to get it from the database
- row = self.database.execute(u"SELECT id, public_key, private_key FROM member WHERE mid = ? LIMIT 1", (buffer(mid),)).fetchone()
+ row = yield self.database.stormdb.fetchone(
+ u"SELECT id, public_key, private_key FROM member WHERE mid = ? LIMIT 1",
+ (buffer(mid),))
if row:
database_id, public_key_from_db, private_key_from_db = row
@@ -501,8 +509,8 @@ def get_member(self, mid="", public_key="", private_key=""):
if private_key:
assert public_key
if private_key_from_db != private_key:
- self.database.execute(u"UPDATE member SET public_key = ?, private_key = ? WHERE id = ?",
- (buffer(public_key), buffer(private_key), database_id))
+ yield self.database.stormdb.execute(u"UPDATE member SET public_key = ?, private_key = ? WHERE id = ?",
+ (buffer(public_key), buffer(private_key), database_id))
else:
# the private key from the database overrules the public key argument
if private_key_from_db:
@@ -511,29 +519,29 @@ def get_member(self, mid="", public_key="", private_key=""):
# the public key argument overrules anything in the database
elif public_key:
if public_key_from_db != public_key:
- self.database.execute(u"UPDATE member SET public_key = ? WHERE id = ?",
- (buffer(public_key), database_id))
+ yield self.database.stormdb.execute(u"UPDATE member SET public_key = ? WHERE id = ?",
+ (buffer(public_key), database_id))
# no priv/pubkey arguments passed, maybe use the public key from the database
elif public_key_from_db:
key = self.crypto.key_from_public_bin(public_key_from_db)
else:
- return DummyMember(self, database_id, mid)
+ returnValue(DummyMember(self, database_id, mid))
# the member is not in the database, insert it
elif public_key or private_key:
if private_key:
assert public_key
# The MID or public/private keys are not in the database, store them.
- database_id = self.database.execute(
+ database_id = yield self.database.stormdb.execute(
u"INSERT INTO member (mid, public_key, private_key) VALUES (?, ?, ?)",
(buffer(mid), buffer(public_key), buffer(private_key)), get_lastrowid=True)
else:
# We could't find the key on the DB, nothing else to do
- database_id = self.database.execute(u"INSERT INTO member (mid) VALUES (?)",
- (buffer(mid),), get_lastrowid=True)
- return DummyMember(self, database_id, mid)
+ database_id = yield self.database.stormdb.execute(u"INSERT INTO member (mid) VALUES (?)",
+ (buffer(mid),), get_lastrowid=True)
+ returnValue(DummyMember(self, database_id, mid))
member = Member(self, key, database_id, mid)
@@ -544,16 +552,19 @@ def get_member(self, mid="", public_key="", private_key=""):
if len(self._member_cache_by_hash) > 1024:
self._member_cache_by_hash.popitem(False)
- return member
+ returnValue(member)
+ @inlineCallbacks
def get_new_member(self, securitylevel=u"medium"):
"""
Returns a Member instance created from a newly generated public key.
"""
assert isinstance(securitylevel, unicode), type(securitylevel)
key = self.crypto.generate_key(securitylevel)
- return self.get_member(private_key=self.crypto.key_to_bin(key))
+ member = yield self.get_member(private_key=self.crypto.key_to_bin(key))
+ returnValue(member)
+ @inlineCallbacks
def get_member_from_database_id(self, database_id):
"""
Returns a Member instance associated with DATABASE_ID or None when this row identifier is
@@ -561,11 +572,14 @@ def get_member_from_database_id(self, database_id):
"""
assert isinstance(database_id, (int, long)), type(database_id)
try:
- public_key, = next(self._database.execute(u"SELECT public_key FROM member WHERE id = ?", (database_id,)))
- return self.get_member(public_key=str(public_key))
- except StopIteration:
+ public_key, = yield self._database.stormdb.fetchone(u"SELECT public_key FROM member WHERE id = ?",
+ (database_id,))
+ member = yield self.get_member(public_key=str(public_key))
+ returnValue(member)
+ except TypeError:
pass
+ @inlineCallbacks
def reclassify_community(self, source, destination):
"""
Change a community classification.
@@ -604,7 +618,7 @@ def reclassify_community(self, source, destination):
master = source.master_member
source.unload_community()
- self._database.execute(u"UPDATE community SET classification = ? WHERE master = ?",
+ yield self._database.stormdb.execute(u"UPDATE community SET classification = ? WHERE master = ?",
(destination_classification, master.database_id))
if destination_classification in self._auto_load_communities:
@@ -612,14 +626,15 @@ def reclassify_community(self, source, destination):
assert cls == destination, [cls, destination]
else:
- my_member_did, = self._database.execute(u"SELECT member FROM community WHERE master = ?",
- (master.database_id,)).next()
+ my_member_did, = yield self._database.stormdb.fetchone(u"SELECT member FROM community WHERE master = ?",
+ (master.database_id,))
- my_member = self.get_member_from_database_id(my_member_did)
+ my_member = yield self.get_member_from_database_id(my_member_did)
args = ()
kargs = {}
- return destination.init_community(self, master, my_member, *args, **kargs)
+ community = yield destination.init_community(self, master, my_member, *args, **kargs)
+ returnValue(community)
def has_community(self, cid):
"""
@@ -627,6 +642,7 @@ def has_community(self, cid):
"""
return cid in self._communities
+ @inlineCallbacks
def get_community(self, cid, load=False, auto_load=True):
"""
Returns a community by its community id.
@@ -656,27 +672,31 @@ def get_community(self, cid, load=False, auto_load=True):
assert isinstance(auto_load, bool)
try:
- return self._communities[cid]
+ returnValue(self._communities[cid])
except KeyError:
if load or auto_load:
try:
# have we joined this community
- classification, auto_load_flag, master_public_key = self._database.execute(u"SELECT community.classification, community.auto_load, member.public_key FROM community JOIN member ON member.id = community.master WHERE mid = ?",
- (buffer(cid),)).next()
-
- except StopIteration:
+ classification, auto_load_flag, master_public_key = yield self._database.stormdb.fetchone(
+ u"""
+ SELECT community.classification, community.auto_load, member.public_key
+ FROM community
+ JOIN member ON member.id = community.master
+ WHERE mid = ?
+ """,
+ (buffer(cid),))
+ except TypeError:
pass
else:
if load or (auto_load and auto_load_flag):
-
if classification in self._auto_load_communities:
- master = self.get_member(public_key=str(master_public_key)) if master_public_key else self.get_member(mid=cid)
+ master = yield self.get_member(public_key=str(master_public_key)) if master_public_key else self.get_member(mid=cid)
cls, my_member, args, kargs = self._auto_load_communities[classification]
- community = cls.init_community(self, master, my_member, *args, **kargs)
+ community = yield cls.init_community(self, master, my_member, *args, **kargs)
assert master.mid in self._communities
- return community
+ returnValue(community)
else:
self._logger.warning("unable to auto load %s is an undefined classification [%s]",
@@ -693,6 +713,7 @@ def get_communities(self):
"""
return self._communities.values()
+ @inlineCallbacks
def get_message(self, community, member, global_time):
"""
Returns a Member.Implementation instance uniquely identified by its community, member, and
@@ -704,24 +725,30 @@ def get_message(self, community, member, global_time):
assert isinstance(member, Member)
assert isinstance(global_time, (int, long))
try:
- packet, = self._database.execute(u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ?",
- (community.database_id, member.database_id, global_time)).next()
- except StopIteration:
- return None
+ packet, = yield self._database.stormdb.fetchone(
+ u"SELECT packet FROM sync WHERE community = ? AND member = ? AND global_time = ?",
+ (community.database_id, member.database_id, global_time))
+ except TypeError:
+ returnValue(None)
else:
- return self.convert_packet_to_message(str(packet), community)
+ message = yield self.convert_packet_to_message(str(packet), community)
+ returnValue(message)
+ @inlineCallbacks
+ # TODO(Laurens): This method is never called?
def get_last_message(self, community, member, meta):
assert isinstance(community, Community)
assert isinstance(member, Member)
assert isinstance(meta, Message)
try:
- packet, = self._database.execute(u"SELECT packet FROM sync WHERE member = ? AND meta_message = ? ORDER BY global_time DESC LIMIT 1",
- (member.database_id, meta.database_id)).next()
- except StopIteration:
- return None
+ packet, = yield self._database.stormdb.fetchone(
+ u"SELECT packet FROM sync WHERE member = ? AND meta_message = ? ORDER BY global_time DESC LIMIT 1",
+ (member.database_id, meta.database_id))
+ except TypeError:
+ returnValue(None)
else:
- return self.convert_packet_to_message(str(packet), community)
+ message = yield self.convert_packet_to_message(str(packet), community)
+ returnValue(message)
def wan_address_unvote(self, voter):
"""
@@ -846,6 +873,7 @@ def set_connection_type(connection_type):
else:
set_connection_type(u"unknown")
+ @inlineCallbacks
def _is_duplicate_sync_message(self, message):
"""
Returns True when this message is a duplicate, otherwise the message must be processed.
@@ -884,11 +912,11 @@ def _is_duplicate_sync_message(self, message):
community = message.community
# fetch the duplicate binary packet from the database
try:
- have_packet, undone = self._database.execute(u"SELECT packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ?",
- (community.database_id, message.authentication.member.database_id, message.distribution.global_time)).next()
- except StopIteration:
+ have_packet, undone = yield self._database.stormdb.fetchone(u"SELECT packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ?",
+ (community.database_id, message.authentication.member.database_id, message.distribution.global_time))
+ except TypeError:
self._logger.debug("this message is not a duplicate")
- return False
+ returnValue(False)
else:
have_packet = str(have_packet)
@@ -903,11 +931,11 @@ def _is_duplicate_sync_message(self, message):
if undone:
try:
- proof, = self._database.execute(u"SELECT packet FROM sync WHERE id = ?", (undone,)).next()
- except StopIteration:
+ proof, = yield self._database.stormdb.fetchone(u"SELECT packet FROM sync WHERE id = ?", (undone,))
+ except TypeError:
pass
else:
- self._send_packets([message.candidate], [str(proof)], community, "-caused by duplicate-undo-")
+ yield self._send_packets([message.candidate], [str(proof)], community, "-caused by duplicate-undo-")
else:
signature_length = message.authentication.member.signature_length
@@ -922,7 +950,7 @@ def _is_duplicate_sync_message(self, message):
if have_packet < message.packet:
# replace our current message with the other one
- self._database.execute(u"UPDATE sync SET packet = ? WHERE community = ? AND member = ? AND global_time = ?",
+ yield self._database.stormdb.execute(u"UPDATE sync SET packet = ? WHERE community = ? AND member = ? AND global_time = ?",
(buffer(message.packet), community.database_id, message.authentication.member.database_id, message.distribution.global_time))
# notify that global times have changed
@@ -933,9 +961,10 @@ def _is_duplicate_sync_message(self, message):
" possibly malicious behaviour", message.candidate)
# this message is a duplicate
- return True
+ returnValue(True)
- @attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution full_sync")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution full_sync")
+ @inlineCallbacks
def _check_full_sync_distribution_batch(self, messages):
"""
Ensure that we do not yet have the messages and that, if sequence numbers are enabled, we
@@ -960,7 +989,9 @@ def _check_full_sync_distribution_batch(self, messages):
# a message is considered unique when (creator, global-time),
# i.e. (authentication.member.database_id, distribution.global_time), is unique.
unique = set()
- execute = self._database.execute
+ execute = self._database.stormdb.execute
+ fetchone = self._database.stormdb.fetchone
+
enable_sequence_number = messages[0].meta.distribution.enable_sequence_number
# sort the messages by their (1) global_time and (2) binary packet
@@ -969,29 +1000,30 @@ def _check_full_sync_distribution_batch(self, messages):
# refuse messages where the global time is unreasonably high
acceptable_global_time = messages[0].community.acceptable_global_time
+ return_list = []
if enable_sequence_number:
# obtain the highest sequence_number from the database
highest = {}
for message in messages:
if not message.authentication.member.database_id in highest:
- last_global_time, last_seq, count = execute(u"SELECT MAX(global_time), MAX(sequence), COUNT(*) FROM sync WHERE member = ? AND meta_message = ?",
- (message.authentication.member.database_id, message.database_id)).next()
+ last_global_time, last_seq, count = yield fetchone(u"SELECT MAX(global_time), MAX(sequence), COUNT(*) FROM sync WHERE member = ? AND meta_message = ?",
+ (message.authentication.member.database_id, message.database_id))
highest[message.authentication.member.database_id] = (last_global_time or 0, last_seq or 0)
assert last_seq or 0 == count, [last_seq, count, message.name]
# all messages must follow the sequence_number order
for message in messages:
if message.distribution.global_time > acceptable_global_time:
- yield DropMessage(message, "global time is not within acceptable range (%d, we accept %d)" % (message.distribution.global_time, acceptable_global_time))
+ return_list.append(DropMessage(message, "global time is not within acceptable range (%d, we accept %d)" % (message.distribution.global_time, acceptable_global_time)))
continue
if not message.distribution.pruning.is_active():
- yield DropMessage(message, "message has been pruned")
+ return_list.append(DropMessage(message, "message has been pruned"))
continue
key = (message.authentication.member.database_id, message.distribution.global_time)
if key in unique:
- yield DropMessage(message, "duplicate message by member^global_time (1)")
+ return_list.append(DropMessage(message, "duplicate message by member^global_time (1)"))
continue
unique.add(key)
@@ -1001,11 +1033,11 @@ def _check_full_sync_distribution_batch(self, messages):
# we already have this message (drop)
# fetch the corresponding packet from the database (it should be binary identical)
- global_time, packet = execute(u"SELECT global_time, packet FROM sync WHERE member = ? AND meta_message = ? ORDER BY global_time, packet LIMIT 1 OFFSET ?",
- (message.authentication.member.database_id, message.database_id, message.distribution.sequence_number - 1)).next()
+ global_time, packet = yield fetchone(u"SELECT global_time, packet FROM sync WHERE member = ? AND meta_message = ? ORDER BY global_time, packet LIMIT 1 OFFSET ?",
+ (message.authentication.member.database_id, message.database_id, message.distribution.sequence_number - 1))
packet = str(packet)
if message.packet == packet:
- yield DropMessage(message, "duplicate message by binary packet")
+ return_list.append(DropMessage(message, "duplicate message by binary packet"))
continue
else:
@@ -1014,73 +1046,77 @@ def _check_full_sync_distribution_batch(self, messages):
if (global_time, packet) < (message.distribution.global_time, message.packet):
# we keep PACKET (i.e. the message that we currently have in our database)
# reply with the packet to let the peer know
- self._send_packets([message.candidate], [packet],
+ yield self._send_packets([message.candidate], [packet],
message.community, "-caused by check_full_sync-")
- yield DropMessage(message, "duplicate message by sequence number (1)")
+ return_list.append(DropMessage(message, "duplicate message by sequence number (1)"))
continue
else:
# TODO we should undo the messages that we are about to remove (when applicable)
- execute(u"DELETE FROM sync WHERE member = ? AND meta_message = ? AND global_time >= ?",
+ yield execute(u"DELETE FROM sync WHERE member = ? AND meta_message = ? AND global_time >= ?",
(message.authentication.member.database_id, message.database_id, global_time))
# by deleting messages we changed SEQ and the HIGHEST cache
- last_global_time, last_seq, count = execute(u"SELECT MAX(global_time), MAX(sequence), COUNT(*) FROM sync WHERE member = ? AND meta_message = ?",
- (message.authentication.member.database_id, message.database_id)).next()
+ last_global_time, last_seq, count = yield fetchone(u"SELECT MAX(global_time), MAX(sequence), COUNT(*) FROM sync WHERE member = ? AND meta_message = ?",
+ (message.authentication.member.database_id, message.database_id))
highest[message.authentication.member.database_id] = (last_global_time or 0, last_seq or 0)
assert last_seq or 0 == count, [last_seq, count, message.name]
# we can allow MESSAGE to be processed
elif seq + 1 != message.distribution.sequence_number:
# we do not have the previous message (delay and request)
- yield DelayMessageBySequence(message, seq + 1, message.distribution.sequence_number - 1)
+ return_list.append(DelayMessageBySequence(message, seq + 1, message.distribution.sequence_number - 1))
continue
# we have the previous message, check for duplicates based on community,
# member, and global_time
- if self._is_duplicate_sync_message(message):
+ is_duplicate_sync_message = yield self._is_duplicate_sync_message(message)
+ if is_duplicate_sync_message:
# we have the previous message (drop)
- yield DropMessage(message, "duplicate message by global_time (1)")
+ return_list.append(DropMessage(message, "duplicate message by global_time (1)"))
continue
# ensure that MESSAGE.distribution.global_time > LAST_GLOBAL_TIME
if last_global_time and message.distribution.global_time <= last_global_time:
self._logger.debug("last_global_time: %d message @%d",
last_global_time, message.distribution.global_time)
- yield DropMessage(message, "higher sequence number with lower global time than most recent message")
+ return_list.append(DropMessage(message, "higher sequence number with lower global time than most recent message"))
continue
# we accept this message
highest[message.authentication.member.database_id] = (message.distribution.global_time, seq + 1)
- yield message
+ return_list.append(message)
else:
for message in messages:
if message.distribution.global_time > acceptable_global_time:
- yield DropMessage(message, "global time is not within acceptable range")
+ return_list.append(DropMessage(message, "global time is not within acceptable range"))
continue
if not message.distribution.pruning.is_active():
- yield DropMessage(message, "message has been pruned")
+ return_list.append(DropMessage(message, "message has been pruned"))
continue
key = (message.authentication.member.database_id, message.distribution.global_time)
if key in unique:
- yield DropMessage(message, "duplicate message by member^global_time (2)")
+ return_list.append(DropMessage(message, "duplicate message by member^global_time (2)"))
continue
unique.add(key)
# check for duplicates based on community, member, and global_time
- if self._is_duplicate_sync_message(message):
+ is_duplicate_sync_message = yield self._is_duplicate_sync_message(message)
+ if is_duplicate_sync_message:
# we have the previous message (drop)
- yield DropMessage(message, "duplicate message by global_time (2)")
+ return_list.append(DropMessage(message, "duplicate message by global_time (2)"))
continue
# we accept this message
- yield message
+ return_list.append(message)
+ returnValue(iter(return_list))
- @attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution last_sync")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution last_sync")
+ @inlineCallbacks
def _check_last_sync_distribution_batch(self, messages):
"""
Check that the messages do not violate any database consistency rules.
@@ -1117,6 +1153,7 @@ def _check_last_sync_distribution_batch(self, messages):
assert all(message.meta == messages[0].meta for message in messages)
assert all(isinstance(message.authentication, (MemberAuthentication.Implementation, DoubleMemberAuthentication.Implementation)) for message in messages)
+ @inlineCallbacks
def check_member_and_global_time(unique, times, message):
"""
The member + global_time combination must always be unique in the database
@@ -1128,19 +1165,21 @@ def check_member_and_global_time(unique, times, message):
key = (message.authentication.member.database_id, message.distribution.global_time)
if key in unique:
- return DropMessage(message, "already processed message by member^global_time")
+ returnValue(DropMessage(message, "already processed message by member^global_time"))
else:
unique.add(key)
if not message.authentication.member.database_id in times:
- times[message.authentication.member.database_id] = [global_time for global_time, in self._database.execute(u"SELECT global_time FROM sync WHERE community = ? AND member = ? AND meta_message = ?",
- (message.community.database_id, message.authentication.member.database_id, message.database_id))]
+ sync_packets = yield self._database.stormdb.fetchall(u"SELECT global_time FROM sync WHERE community = ? AND member = ? AND meta_message = ?",
+ (message.community.database_id, message.authentication.member.database_id, message.database_id))
+ times[message.authentication.member.database_id] = [global_time for global_time, in sync_packets]
assert len(times[message.authentication.member.database_id]) <= message.distribution.history_size, [message.packet_id, message.distribution.history_size, times[message.authentication.member.database_id]]
tim = times[message.authentication.member.database_id]
- if message.distribution.global_time in tim and self._is_duplicate_sync_message(message):
- return DropMessage(message, "duplicate message by member^global_time (3)")
+ is_duplicate_sync_message = yield self._is_duplicate_sync_message(message)
+ if message.distribution.global_time in tim and is_duplicate_sync_message:
+ returnValue(DropMessage(message, "duplicate message by member^global_time (3)"))
elif len(tim) >= message.distribution.history_size and min(tim) > message.distribution.global_time:
# we have newer messages (drop)
@@ -1149,23 +1188,24 @@ def check_member_and_global_time(unique, times, message):
# apparently the sender does not have this message yet
if message.distribution.history_size == 1:
try:
- packet, = self._database.execute(u"SELECT packet FROM sync WHERE community = ? AND member = ? ORDER BY global_time DESC LIMIT 1",
- (message.community.database_id, message.authentication.member.database_id)).next()
- except StopIteration:
+ packet, = yield self._database.stormdb.fetchone(u"SELECT packet FROM sync WHERE community = ? AND member = ? ORDER BY global_time DESC LIMIT 1",
+ (message.community.database_id, message.authentication.member.database_id))
+ except TypeError:
# TODO can still fail when packet is in one of the received messages
# from this batch.
pass
else:
- self._send_packets([message.candidate], [str(packet)],
+ yield self._send_packets([message.candidate], [str(packet)],
message.community, "-caused by check_last_sync:check_member-")
- return DropMessage(message, "old message by member^global_time")
+ returnValue(DropMessage(message, "old message by member^global_time"))
else:
# we accept this message
tim.append(message.distribution.global_time)
- return message
+ returnValue(message)
+ @inlineCallbacks
def check_double_member_and_global_time(unique, times, message):
"""
No other message may exist with this message.authentication.members / global_time
@@ -1181,7 +1221,7 @@ def check_double_member_and_global_time(unique, times, message):
self._logger.debug("drop %s %d@%d (in unique)",
message.name, message.authentication.member.database_id,
message.distribution.global_time)
- return DropMessage(message, "already processed message by member^global_time")
+ returnValue(DropMessage(message, "already processed message by member^global_time"))
else:
unique.add(key)
@@ -1191,31 +1231,33 @@ def check_double_member_and_global_time(unique, times, message):
if key in unique:
self._logger.debug("drop %s %s@%d (in unique)",
message.name, members, message.distribution.global_time)
- return DropMessage(message, "already processed message by members^global_time")
+ returnValue(DropMessage(message, "already processed message by members^global_time"))
else:
unique.add(key)
-
- if self._is_duplicate_sync_message(message):
+ is_duplicate_sync_message = yield self._is_duplicate_sync_message(message)
+ if is_duplicate_sync_message:
# we have the previous message (drop)
self._logger.debug("drop %s %s@%d (_is_duplicate_sync_message)",
message.name, members, message.distribution.global_time)
- return DropMessage(message, "duplicate message by member^global_time (4)")
+ returnValue(DropMessage(message, "duplicate message by member^global_time (4)"))
if not members in times:
# the next query obtains a list with all global times that we have in the
# database for all message.meta messages that were signed by
# message.authentication.members where the order of signing is not taken
# into account.
+ sync_packets = yield self._database.stormdb.fetchall(u"""
+ SELECT sync.global_time, sync.id, sync.packet
+ FROM sync
+ JOIN double_signed_sync ON double_signed_sync.sync = sync.id
+ WHERE sync.meta_message = ? AND double_signed_sync.member1 = ?
+ AND double_signed_sync.member2 = ?
+ """, (message.database_id,) + members)
+
times[members] = dict((global_time, (packet_id, str(packet)))
for global_time, packet_id, packet
- in self._database.execute(u"""
-SELECT sync.global_time, sync.id, sync.packet
-FROM sync
-JOIN double_signed_sync ON double_signed_sync.sync = sync.id
-WHERE sync.meta_message = ? AND double_signed_sync.member1 = ? AND double_signed_sync.member2 = ?
-""",
- (message.database_id,) + members))
+ in sync_packets)
assert len(times[members]) <= message.distribution.history_size, [len(times[members]),
message.distribution.history_size]
tim = times[members]
@@ -1230,7 +1272,7 @@ def check_double_member_and_global_time(unique, times, message):
members,
message.distribution.global_time,
message.candidate)
- return DropMessage(message, "duplicate message by binary packet (1)")
+ returnValue(DropMessage(message, "duplicate message by binary packet (1)"))
else:
signature_length = sum(member.signature_length for member in message.authentication.members)
@@ -1246,18 +1288,18 @@ def check_double_member_and_global_time(unique, times, message):
if have_packet < message.packet:
# replace our current message with the other one
- self._database.execute(u"UPDATE sync SET member = ?, packet = ? WHERE id = ?",
+ yield self._database.stormdb.execute(u"UPDATE sync SET member = ?, packet = ? WHERE id = ?",
(message.authentication.member.database_id, buffer(message.packet), packet_id))
- return DropMessage(message, "replaced existing packet with other packet with the same payload")
+ returnValue(DropMessage(message, "replaced existing packet with other packet with the same payload"))
- return DropMessage(message, "not replacing existing packet with other packet with the same payload")
+ returnValue(DropMessage(message, "not replacing existing packet with other packet with the same payload"))
else:
self._logger.warning("received message with duplicate community/members/global-time"
" triplet from %s. possibly malicious behavior",
message.candidate)
- return DropMessage(message, "duplicate message by binary packet (2)")
+ returnValue(DropMessage(message, "duplicate message by binary packet (2)"))
elif len(tim) >= message.distribution.history_size and min(tim) > message.distribution.global_time:
# we have newer messages (drop)
@@ -1266,18 +1308,18 @@ def check_double_member_and_global_time(unique, times, message):
# apparently the sender does not have this message yet
if message.distribution.history_size == 1:
packet_id, have_packet = tim.values()[0]
- self._send_packets([message.candidate], [have_packet],
+ yield self._send_packets([message.candidate], [have_packet],
message.community, "-caused by check_last_sync:check_double_member-")
self._logger.debug("drop %s %s@%d (older than %s)",
message.name, members, message.distribution.global_time, min(tim))
- return DropMessage(message, "old message by members^global_time")
+ returnValue(DropMessage(message, "old message by members^global_time"))
else:
# we accept this message
self._logger.debug("accept %s %s@%d", message.name, members, message.distribution.global_time)
tim[message.distribution.global_time] = (0, message.packet)
- return message
+ returnValue(message)
# meta message
meta = messages[0].meta
@@ -1305,7 +1347,14 @@ def check_double_member_and_global_time(unique, times, message):
# function
unique = set()
times = {}
- messages = [message if isinstance(message, DropMessage) else check_member_and_global_time(unique, times, message) for message in messages]
+ new_messages = []
+ for message in messages:
+ if isinstance(message, DropMessage):
+ new_messages.append(message)
+ else:
+ m = yield check_member_and_global_time(unique, times, message)
+ new_messages.append(m)
+ messages = new_messages
# instead of storing HISTORY_SIZE messages for each authentication.member, we will store
# HISTORY_SIZE messages for each combination of authentication.members.
@@ -1313,11 +1362,18 @@ def check_double_member_and_global_time(unique, times, message):
assert isinstance(meta.authentication, DoubleMemberAuthentication)
unique = set()
times = {}
- messages = [message if isinstance(message, DropMessage) else check_double_member_and_global_time(unique, times, message) for message in messages]
+ new_messages = []
+ for message in messages:
+ if isinstance(message, DropMessage):
+ new_messages.append(message)
+ else:
+ m = yield check_double_member_and_global_time(unique, times, message)
+ new_messages.append(m)
+ messages = new_messages
- return messages
+ returnValue(messages)
- @attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution direct")
+ #@attach_runtime_statistics(u"{0.__class__.__name__}._check_distribution direct")
def _check_direct_distribution_batch(self, messages):
"""
Returns the messages in the correct processing order.
@@ -1350,6 +1406,7 @@ def _check_direct_distribution_batch(self, messages):
return messages
+ @inlineCallbacks
def load_message(self, community, member, global_time, verify=False):
"""
Returns the message identified by community, member, and global_time.
@@ -1364,17 +1421,19 @@ def load_message(self, community, member, global_time, verify=False):
assert isinstance(global_time, (int, long)), type(global_time)
try:
- packet_id, packet, undone = self._database.execute(u"SELECT id, packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ? LIMIT 1",
- (community.database_id, member.database_id, global_time)).next()
- except StopIteration:
- return None
+ packet_id, packet, undone = yield self._database.stormdb.fetchone(
+ u"SELECT id, packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ? LIMIT 1",
+ (community.database_id, member.database_id, global_time))
+ except TypeError:
+ returnValue(None)
- message = self.convert_packet_to_message(str(packet), community, verify=verify)
+ message = yield self.convert_packet_to_message(str(packet), community, verify=verify)
if message:
message.packet_id = packet_id
message.undone = undone
- return message
+ returnValue(message)
+ @inlineCallbacks
def load_message_by_packetid(self, community, packet_id, verify=False):
"""
Returns the message identified by community, member, and global_time.
@@ -1388,17 +1447,18 @@ def load_message_by_packetid(self, community, packet_id, verify=False):
assert isinstance(packet_id, (int, long)), type(packet_id)
try:
- packet, undone = self._database.execute(u"SELECT packet, undone FROM sync WHERE id = ?",
- (packet_id,)).next()
- except StopIteration:
- return None
+ packet, undone = yield self._database.stormdb.fetchone(u"SELECT packet, undone FROM sync WHERE id = ?",
+ (packet_id,))
+ except TypeError:
+ returnValue(None)
- message = self.convert_packet_to_message(str(packet), community, verify=verify)
+ message = yield self.convert_packet_to_message(str(packet), community, verify=verify)
if message:
message.packet_id = packet_id
message.undone = undone
- return message
+ returnValue(message)
+ @inlineCallbacks
def convert_packet_to_message(self, packet, community=None, load=True, auto_load=True, candidate=None, verify=True):
"""
Returns the Message.Implementation representing the packet or None when no conversion is
@@ -1413,11 +1473,12 @@ def convert_packet_to_message(self, packet, community=None, load=True, auto_load
# find associated community
try:
if not community:
- community = self.get_community(packet[2:22], load, auto_load)
+ community = yield self.get_community(packet[2:22], load, auto_load)
# find associated conversion
conversion = community.get_conversion_for_packet(packet)
- return conversion.decode_message(LoopbackCandidate() if candidate is None else candidate, packet, verify)
+ decoded_message = yield conversion.decode_message(LoopbackCandidate() if candidate is None else candidate, packet, verify)
+ returnValue(decoded_message)
except CommunityNotFoundException:
self._logger.warning("unable to convert a %d byte packet (unknown community)", len(packet))
@@ -1425,8 +1486,9 @@ def convert_packet_to_message(self, packet, community=None, load=True, auto_load
self._logger.warning("unable to convert a %d byte packet (unknown conversion)", len(packet))
except (DropPacket, DelayPacket) as exception:
self._logger.warning("unable to convert a %d byte packet (%s)", len(packet), exception)
- return None
+ returnValue(None)
+ @inlineCallbacks
def convert_packets_to_messages(self, packets, community=None, load=True, auto_load=True, candidate=None, verify=True):
"""
Returns a list with messages representing each packet or None when no conversion is
@@ -1434,8 +1496,13 @@ def convert_packets_to_messages(self, packets, community=None, load=True, auto_l
"""
assert isinstance(packets, Iterable), type(packets)
assert all(isinstance(packet, str) for packet in packets), [type(packet) for packet in packets]
- return [self.convert_packet_to_message(packet, community, load, auto_load, candidate, verify) for packet in packets]
+ return_messages = []
+ for packet in packets:
+ message = yield self.convert_packet_to_message(packet, community, load, auto_load, candidate, verify)
+ return_messages.append(message)
+ returnValue(return_messages)
+ @inlineCallbacks
def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unknown"):
"""
Process incoming UDP packets.
@@ -1477,8 +1544,8 @@ def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unkno
for community_id, iterator in groupby(sorted(packets, key=sort_key), key=groupby_key):
# find associated community
try:
- community = self.get_community(community_id)
- community.on_incoming_packets(list(iterator), cache, timestamp, source)
+ community = yield self.get_community(community_id)
+ yield community.on_incoming_packets(list(iterator), cache, timestamp, source)
except CommunityNotFoundException:
packets = list(iterator)
@@ -1490,7 +1557,8 @@ def on_incoming_packets(self, packets, cache=True, timestamp=0.0, source=u"unkno
else:
self._logger.info("dropping %d packets as dispersy is not running", len(packets))
- @attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ #@attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ @inlineCallbacks
def _store(self, messages):
"""
Store a message in the database.
@@ -1536,7 +1604,7 @@ def _store(self, messages):
message.authentication.member.database_id, message.distribution.global_time)
# add packet to database
- message.packet_id = self._database.execute(
+ message.packet_id = yield self._database.stormdb.execute(
u"INSERT INTO sync (community, member, global_time, meta_message, packet, sequence) "
u"VALUES (?, ?, ?, ?, ?, ?)",
(message.community.database_id,
@@ -1555,8 +1623,12 @@ def _store(self, messages):
if is_double_member_authentication:
member1 = message.authentication.members[0].database_id
member2 = message.authentication.members[1].database_id
- self._database.execute(u"INSERT INTO double_signed_sync (sync, member1, member2) VALUES (?, ?, ?)",
- (message.packet_id, member1, member2) if member1 < member2 else (message.packet_id, member2, member1))
+ if member1 < member2:
+ yield self._database.stormdb.insert(u"double_signed_sync", sync=message.packet_id, member1=member1,
+ member2=member2)
+ else:
+ yield self._database.stormdb.insert(u"double_signed_sync", sync=message.packet_id, member1=member2,
+ member2=member1)
# update global time
highest_global_time = max(highest_global_time, message.distribution.global_time)
@@ -1568,9 +1640,9 @@ def _store(self, messages):
# when sequence numbers are enabled, we must have exactly
# message.distribution.sequence_number messages in the database
for member_id, max_sequence_number in highest_sequence_number.iteritems():
- count_, = self._database.execute(u"SELECT COUNT(*) FROM sync "
+ count_, = yield self._database.stormdb.fetchone(u"SELECT COUNT(*) FROM sync "
u"WHERE meta_message = ? AND member = ? AND sequence BETWEEN 1 AND ?",
- (message.database_id, member_id, max_sequence_number)).next()
+ (message.database_id, member_id, max_sequence_number))
assert count_ == max_sequence_number, [count_, max_sequence_number]
if isinstance(meta.distribution, LastSyncDistribution):
@@ -1586,30 +1658,30 @@ def _store(self, messages):
order = lambda member1, member2: (member1, member2) if member1 < member2 else (member2, member1)
for member1, member2 in set(order(message.authentication.members[0].database_id, message.authentication.members[1].database_id) for message in messages):
assert member1 < member2, [member1, member2]
- all_items = list(self._database.execute(u"""
+ all_items = yield self._database.stormdb.fetchall(u"""
SELECT sync.id, sync.global_time
FROM sync
JOIN double_signed_sync ON double_signed_sync.sync = sync.id
WHERE sync.meta_message = ? AND double_signed_sync.member1 = ? AND double_signed_sync.member2 = ?
-ORDER BY sync.global_time, sync.packet""", (meta.database_id, member1, member2)))
+ORDER BY sync.global_time, sync.packet""", (meta.database_id, member1, member2))
if len(all_items) > meta.distribution.history_size:
items.update(all_items[:len(all_items) - meta.distribution.history_size])
else:
for member_database_id in set(message.authentication.member.database_id for message in messages):
- all_items = list(self._database.execute(u"""
+ all_items = yield self._database.stormdb.fetchall(u"""
SELECT id, global_time
FROM sync
WHERE meta_message = ? AND member = ?
-ORDER BY global_time""", (meta.database_id, member_database_id)))
+ORDER BY global_time""", (meta.database_id, member_database_id))
if len(all_items) > meta.distribution.history_size:
items.update(all_items[:len(all_items) - meta.distribution.history_size])
if items:
- self._database.executemany(u"DELETE FROM sync WHERE id = ?", [(syncid,) for syncid, _ in items])
+ yield self._database.stormdb.executemany(u"DELETE FROM sync WHERE id = ?", [(syncid,) for syncid, _ in items])
if is_double_member_authentication:
- self._database.executemany(u"DELETE FROM double_signed_sync WHERE sync = ?", [(syncid,) for syncid, _ in items])
+ yield self._database.stormdb.executemany(u"DELETE FROM double_signed_sync WHERE sync = ?", [(syncid,) for syncid, _ in items])
# update_sync_range.update(global_time for _, _, global_time in items)
@@ -1617,11 +1689,11 @@ def _store(self, messages):
if __debug__:
if not is_double_member_authentication and meta.distribution.custom_callback is None:
for message in messages:
- history_size, = self._database.execute(u"SELECT COUNT(*) FROM sync WHERE meta_message = ? AND member = ?", (message.database_id, message.authentication.member.database_id)).next()
+ history_size, = yield self._database.stormdb.fetchone(u"SELECT COUNT(*) FROM sync WHERE meta_message = ? AND member = ?", (message.database_id, message.authentication.member.database_id))
assert history_size <= message.distribution.history_size, [history_size, message.distribution.history_size, message.authentication.member.database_id]
# update the global time
- meta.community.update_global_time(highest_global_time)
+ yield meta.community.update_global_time(highest_global_time)
meta.community.dispersy_store(messages)
@@ -1657,6 +1729,7 @@ def estimate_lan_and_wan_addresses(self, sock_addr, lan_address, wan_address):
return lan_address, wan_address
# TODO(emilon): Now that we have removed the malicious behaviour stuff, maybe we could be a bit more relaxed with the DB syncing?
+ @inlineCallbacks
def store_update_forward(self, possibly_messages, store, update, forward):
"""
Usually we need to do three things when we have a valid messages: (1) store it in our local
@@ -1711,11 +1784,12 @@ def store_update_forward(self, possibly_messages, store, update, forward):
store = store and isinstance(messages[0].meta.distribution, SyncDistribution)
if store:
- self._store(messages)
+ yield self._store(messages)
if update:
- if self._update(possibly_messages) == False:
- return False
+ update_res = yield self._update(possibly_messages)
+ if update_res == False:
+ returnValue(False)
# 07/10/11 Boudewijn: we will only commit if it the message was create by our self.
# Otherwise we can safely skip the commit overhead, since, if a crash occurs, we will be
@@ -1724,30 +1798,33 @@ def store_update_forward(self, possibly_messages, store, update, forward):
my_messages = sum(message.authentication.member == message.community.my_member for message in messages)
if my_messages:
self._logger.debug("commit user generated message")
- self._database.commit()
+ self._database.stormdb.commit()
messages[0].community.statistics.increase_msg_count(u"created", messages[0].meta.name, my_messages)
if forward:
- return self._forward(messages)
+ forward_result = yield self._forward(messages)
+ returnValue(forward_result)
- return True
+ returnValue(True)
- @attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ #@attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ @inlineCallbacks
def _update(self, messages):
"""
Call the handle callback of a list of messages of the same type.
"""
try:
- messages[0].handle_callback(messages)
- return True
+ yield messages[0].handle_callback(messages)
+ returnValue(True)
except (SystemExit, KeyboardInterrupt, GeneratorExit, AssertionError):
raise
- except:
+ except Exception:
self._logger.exception("exception during handle_callback for %s", messages[0].name)
- return False
+ returnValue(False)
- @attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ #@attach_runtime_statistics(u"Dispersy.{function_name} {1[0].name}")
+ @inlineCallbacks
def _forward(self, messages):
"""
Queue a sequence of messages to be sent to other members.
@@ -1786,12 +1863,14 @@ def _forward(self, messages):
candidates.add(candidate)
else:
break
- result = result and self._send(tuple(candidates), [message])
+ send_result = yield self._send(tuple(candidates), [message])
+ result = result and send_result
else:
raise NotImplementedError(meta.destination)
- return result
+ returnValue(result)
+ @inlineCallbacks
def _delay(self, delay, packet, candidate):
for key in delay.match_info:
assert len(key) == 5, key
@@ -1805,11 +1884,12 @@ def _delay(self, delay, packet, candidate):
try:
- community = self.get_community(key[0], load=False, auto_load=False)
- community._delay(key[1:], delay, packet, candidate)
+ community = yield self.get_community(key[0], load=False, auto_load=False)
+ yield community._delay(key[1:], delay, packet, candidate)
except CommunityNotFoundException:
self._logger.error('Messages can only be delayed for loaded communities.')
+ @inlineCallbacks
def _send(self, candidates, messages):
"""
Send a list of messages to a list of candidates. If no candidates are specified or endpoint reported
@@ -1832,7 +1912,7 @@ def _send(self, candidates, messages):
messages_send = False
if len(candidates) and len(messages):
packets = [message.packet for message in messages]
- messages_send = self._endpoint.send(candidates, packets)
+ messages_send = yield self._endpoint.send(candidates, packets)
if messages_send:
for message in messages:
@@ -1848,14 +1928,16 @@ def _send(self, candidates, messages):
message.community.statistics.increase_msg_count(
u"outgoing", message.meta.name, len(candidates))
- return messages_send
+ returnValue(messages_send)
+ @inlineCallbacks
def _send_packets(self, candidates, packets, community, msg_type):
"""A wrap method to use send() in endpoint.
"""
- self._endpoint.send(candidates, packets)
+ yield self._endpoint.send(candidates, packets)
community.statistics.increase_msg_count(u"outgoing", msg_type, len(candidates) * len(packets))
+ @inlineCallbacks
def sanity_check(self, community, test_identity=True, test_undo_other=True, test_binary=False, test_sequence_number=True, test_last_sync=True):
"""
Check everything we can about a community.
@@ -1869,12 +1951,14 @@ def sanity_check(self, community, test_identity=True, test_undo_other=True, test
- check sequence numbers for FullSyncDistribution
- check history size for LastSyncDistribution
"""
- def select(sql, bindings):
+
+ @inlineCallbacks
+ def fetchall(sql, bindings):
assert isinstance(sql, unicode)
assert isinstance(bindings, tuple)
limit = 1000
for offset in (i * limit for i in count()):
- rows = list(self._database.execute(sql, bindings + (limit, offset)))
+ rows = yield self._database.stormdb.fetchall(sql, bindings + (limit, offset))
if rows:
for row in rows:
yield row
@@ -1890,21 +1974,21 @@ def select(sql, bindings):
meta_identity = community.get_meta_message(u"dispersy-identity")
try:
- member_id, = self._database.execute(u"SELECT id FROM member WHERE mid = ?", (buffer(community.my_member.mid),)).next()
- except StopIteration:
+ member_id, = yield self._database.stormdb.fetchone(u"SELECT id FROM member WHERE mid = ?", (buffer(community.my_member.mid),))
+ except TypeError:
raise ValueError("unable to find the public key for my member")
if not member_id == community.my_member.database_id:
raise ValueError("my member's database id is invalid", member_id, community.my_member.database_id)
try:
- self._database.execute(u"SELECT 1 FROM member WHERE id = ? AND private_key IS NOT NULL", (member_id,)).next()
- except StopIteration:
+ _, = yield self._database.stormdb.fetchone(u"SELECT 1 FROM member WHERE id = ? AND private_key IS NOT NULL", (member_id,))
+ except TypeError:
raise ValueError("unable to find the private key for my member")
try:
- self._database.execute(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ?", (member_id, meta_identity.database_id)).next()
- except StopIteration:
+ _, = yield self._database.stormdb.fetchone(u"SELECT 1 FROM sync WHERE member = ? AND meta_message = ?", (member_id, meta_identity.database_id))
+ except TypeError:
raise ValueError("unable to find the dispersy-identity message for my member")
self._logger.debug("my identity is OK")
@@ -1913,8 +1997,10 @@ def select(sql, bindings):
# the dispersy-identity must be in the database for each member that has one or more
# messages in the database
#
- A = set(id_ for id_, in self._database.execute(u"SELECT member FROM sync WHERE community = ? GROUP BY member", (community.database_id,)))
- B = set(id_ for id_, in self._database.execute(u"SELECT member FROM sync WHERE meta_message = ?", (meta_identity.database_id,)))
+ a_raw = yield fetchall(u"SELECT member FROM sync WHERE community = ? GROUP BY member", (community.database_id,))
+ A = set(id_ for id_, in a_raw)
+ b_raw = yield fetchall(u"SELECT member FROM sync WHERE meta_message = ?", (meta_identity.database_id,))
+ B = set(id_ for id_, in b_raw)
if not len(A) == len(B):
raise ValueError("inconsistent dispersy-identity messages.", A.difference(B))
@@ -1928,22 +2014,27 @@ def select(sql, bindings):
meta_undo_other = community.get_meta_message(u"dispersy-undo-other")
# TODO we are not taking into account that undo messages can be undone
- for undo_packet_id, undo_packet_global_time, undo_packet in select(u"SELECT id, global_time, packet FROM sync WHERE community = ? AND meta_message = ? ORDER BY id LIMIT ? OFFSET ?", (community.database_id, meta_undo_other.database_id)):
+ sync_packets = yield fetchall(u"SELECT id, global_time, packet FROM sync WHERE community = ? AND meta_message = ? ORDER BY id LIMIT ? OFFSET ?", (community.database_id, meta_undo_other.database_id))
+ for undo_packet_id, undo_packet_global_time, undo_packet in sync_packets:
undo_packet = str(undo_packet)
- undo_message = self.convert_packet_to_message(undo_packet, community, verify=False)
+ undo_message = yield self.convert_packet_to_message(undo_packet, community, verify=False)
# 10/10/12 Boudewijn: the check_callback is required to obtain the
# message.payload.packet
- for _ in undo_message.check_callback([undo_message]):
+ undo_message_res = yield undo_message.check_callback([undo_message])
+ for _ in undo_message_res:
pass
# get the message that undo_message refers to
try:
- packet, undone = self._database.execute(u"SELECT packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ?", (community.database_id, undo_message.payload.member.database_id, undo_message.payload.global_time)).next()
- except StopIteration:
+ packet, undone = yield self._database.stormdb.fetchone(
+ u"SELECT packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ?",
+ (community.database_id, undo_message.payload.member.database_id,
+ undo_message.payload.global_time))
+ except TypeError:
raise ValueError("found dispersy-undo-other but not the message that it refers to")
packet = str(packet)
- message = self.convert_packet_to_message(packet, community, verify=False)
+ message = yield self.convert_packet_to_message(packet, community, verify=False)
if not undone:
raise ValueError("found dispersy-undo-other but the message that it refers to is not undone")
@@ -1976,10 +2067,11 @@ def select(sql, bindings):
# ensure all packets in the database are valid and that the binary packets are consistent
# with the information stored in the database
#
- for packet_id, member_id, global_time, meta_message_id, packet in select(u"SELECT id, member, global_time, meta_message, packet FROM sync WHERE community = ? ORDER BY id LIMIT ? OFFSET ?", (community.database_id,)):
+ sync_packets = yield fetchall(u"SELECT id, member, global_time, meta_message, packet FROM sync WHERE community = ? ORDER BY id LIMIT ? OFFSET ?", (community.database_id,))
+ for packet_id, member_id, global_time, meta_message_id, packet in sync_packets:
if meta_message_id in enabled_messages:
packet = str(packet)
- message = self.convert_packet_to_message(packet, community, verify=True)
+ message = yield self.convert_packet_to_message(packet, community, verify=True)
if not message:
raise ValueError("unable to convert packet ", packet_id, "@", global_time, " to message")
@@ -2010,9 +2102,10 @@ def select(sql, bindings):
counter = 0
counter_member_id = 0
exception = None
- for packet_id, member_id, packet in select(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member, global_time LIMIT ? OFFSET ?", (meta.database_id,)):
+ sync_packets = yield fetchall(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member, global_time LIMIT ? OFFSET ?", (meta.database_id,))
+ for packet_id, member_id, packet in sync_packets:
packet = str(packet)
- message = self.convert_packet_to_message(packet, community, verify=False)
+ message = yield self.convert_packet_to_message(packet, community, verify=False)
assert message
if member_id != counter_member_id:
@@ -2044,8 +2137,9 @@ def select(sql, bindings):
if isinstance(meta.authentication, MemberAuthentication):
counter = 0
counter_member_id = 0
- for packet_id, member_id, packet in select(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member ASC, global_time DESC LIMIT ? OFFSET ?", (meta.database_id,)):
- message = self.convert_packet_to_message(str(packet), community, verify=False)
+ sync_packets = yield fetchall(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member ASC, global_time DESC LIMIT ? OFFSET ?", (meta.database_id,))
+ for packet_id, member_id, packet in sync_packets:
+ message = yield self.convert_packet_to_message(str(packet), community, verify=False)
assert message
if member_id == counter_member_id:
@@ -2061,13 +2155,14 @@ def select(sql, bindings):
else:
assert isinstance(meta.authentication, DoubleMemberAuthentication)
- for packet_id, member_id, packet in select(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member ASC, global_time DESC LIMIT ? OFFSET ?", (meta.database_id,)):
- message = self.convert_packet_to_message(str(packet), community, verify=False)
+ sync_packets = yield fetchall(u"SELECT id, member, packet FROM sync WHERE meta_message = ? ORDER BY member ASC, global_time DESC LIMIT ? OFFSET ?", (meta.database_id,))
+ for packet_id, member_id, packet in sync_packets:
+ message = yield self.convert_packet_to_message(str(packet), community, verify=False)
assert message
try:
- member1, member2 = self._database.execute(u"SELECT member1, member2 FROM double_signed_sync WHERE sync = ?", (packet_id,)).next()
- except StopIteration:
+ member1, member2 = yield self._database.stormdb.fetchone(u"SELECT member1, member2 FROM double_signed_sync WHERE sync = ?", (packet_id,))
+ except TypeError:
raise ValueError("found double signed message without an entry in the double_signed_sync table")
if not member1 < member2:
@@ -2086,7 +2181,7 @@ def _flush_database(self):
"""
try:
# flush changes to disk every 1 minutes
- self._database.commit()
+ self._database.stormdb.commit()
except Exception as exception:
# OperationalError: database is locked
@@ -2094,6 +2189,7 @@ def _flush_database(self):
# TODO(emilon): Shouldn't start() just raise an exception if something goes wrong?, that would clean up a lot of cruft
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def start(self, autoload_discovery=True):
"""
Starts Dispersy.
@@ -2114,7 +2210,8 @@ def start(self, autoload_discovery=True):
assert all(isinstance(result, bool) for _, result in results), [type(result) for _, result in results]
- results.append((u"database", self._database.open()))
+ result_open_db = yield self._database.open()
+ results.append((u"database", result_open_db))
assert all(isinstance(result, bool) for _, result in results), [type(result) for _, result in results]
results.append((u"endpoint", self._endpoint.open(self)))
@@ -2138,15 +2235,19 @@ def start(self, autoload_discovery=True):
self._logger.info("Dispersy core loading DiscoveryCommunity")
# TODO: pass None instead of new member, let community decide if we need a new member or not.
- self._discovery_community = self.define_auto_load(DiscoveryCommunity, self.get_new_member(), load=True)[0]
- return True
+ new_dispersy_member = yield self.get_new_member()
+ auto_load_result = yield self.define_auto_load(DiscoveryCommunity, new_dispersy_member, load=True)
+ self._discovery_community = auto_load_result[0]
+ returnValue(True)
else:
self._logger.error("Dispersy core unable to start all components [%s]",
", ".join("{0}:{1}".format(key, value) for key, value in results))
- return False
+ returnValue(False)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
+ # TODO(Laurens): check callers
def stop(self, timeout=10.0):
"""
Stops Dispersy.
@@ -2173,37 +2274,37 @@ def stop(self, timeout=10.0):
self.cancel_all_pending_tasks()
+ @inlineCallbacks
def unload_communities(communities):
for community in communities:
if community.cid in self._communities:
self._logger.debug("Unloading %s (the reactor has %s delayed calls scheduled)",
community, len(reactor.getDelayedCalls()))
- community.unload_community()
+ yield community.unload_community()
self._logger.debug("Unloaded %s (the reactor has %s delayed calls scheduled now)",
community, len(reactor.getDelayedCalls()))
else:
self._logger.warning("Attempting to unload %s which is not loaded", community)
+ results = {}
+
self._logger.info('Stopping Dispersy Core..')
if os.environ.get("DISPERSY_PRINT_STATISTICS", "False").lower() == "true":
# output statistics before we stop
if self._logger.isEnabledFor(logging.DEBUG):
- self._statistics.update()
+ results[u"statistics"] = self._statistics.update()
self._logger.debug("\n%s", pformat(self._statistics.get_dict(), width=120))
_runtime_statistics.clear()
- self._logger.info("stopping the Dispersy core...")
- results = {}
-
# unload communities that are not defined
- unload_communities([community
+ yield unload_communities([community
for community
in self._communities.itervalues()
if not community.get_classification() in self._auto_load_communities])
# unload communities in reverse auto load order
for classification in reversed(self._auto_load_communities):
- unload_communities([community
+ yield unload_communities([community
for community
in self._communities.itervalues()
if community.get_classification() == classification])
@@ -2212,8 +2313,8 @@ def unload_communities(communities):
# stop endpoint
results[u"endpoint"] = maybeDeferred(self._endpoint.close, timeout)
- # stop the database
- results[u"database"] = maybeDeferred(self._database.close)
+ # stop the database + stormdb
+ results[u"database"] = maybeDeferred(self._database.stormdb.close)
def check_stop_status(return_values):
failures = []
@@ -2226,7 +2327,8 @@ def check_stop_status(return_values):
return False
return True
- return gatherResults(results.values(), consumeErrors=True).addBoth(check_stop_status)
+ res = yield gatherResults(results.values(), consumeErrors=True).addBoth(check_stop_status)
+ returnValue(res)
def _stats_detailed_candidates(self):
"""
diff --git a/dispersydatabase.py b/dispersydatabase.py
index 7b5cc51a..877d1a13 100644
--- a/dispersydatabase.py
+++ b/dispersydatabase.py
@@ -8,70 +8,74 @@
from itertools import groupby
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from .database import Database
from .distribution import FullSyncDistribution
-
LATEST_VERSION = 21
-schema = u"""
-CREATE TABLE member(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- mid BLOB, -- member identifier (sha1 of public_key)
- public_key BLOB, -- member public key
- private_key BLOB); -- member private key
-CREATE INDEX member_mid_index ON member(mid);
-
-CREATE TABLE community(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- master INTEGER REFERENCES member(id), -- master member (permission tree root)
- member INTEGER REFERENCES member(id), -- my member (used to sign messages)
- classification TEXT, -- community type, typically the class name
- auto_load BOOL DEFAULT 1, -- when 1 this community is loaded whenever a packet for it is received
- database_version INTEGER DEFAULT """ + str(LATEST_VERSION) + """,
- UNIQUE(master));
-
-CREATE TABLE meta_message(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- name TEXT,
- priority INTEGER DEFAULT 128,
- direction INTEGER DEFAULT 1, -- direction used when synching (1 for ASC, -1 for DESC)
- UNIQUE(community, name));
-
---CREATE TABLE reference_member_sync(
--- member INTEGER REFERENCES member(id),
--- sync INTEGER REFERENCES sync(id),
--- UNIQUE(member, sync));
-
-CREATE TABLE double_signed_sync(
- sync INTEGER REFERENCES sync(id),
- member1 INTEGER REFERENCES member(id),
- member2 INTEGER REFERENCES member(id));
-CREATE INDEX double_signed_sync_index_0 ON double_signed_sync(member1, member2);
-
-CREATE TABLE sync(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- member INTEGER REFERENCES member(id), -- the creator of the message
- global_time INTEGER,
- meta_message INTEGER REFERENCES meta_message(id),
- undone INTEGER DEFAULT 0,
- packet BLOB,
- sequence INTEGER,
- UNIQUE(community, member, global_time));
-CREATE INDEX sync_meta_message_undone_global_time_index ON sync(meta_message, undone, global_time);
-CREATE INDEX sync_meta_message_member ON sync(meta_message, member);
-
-CREATE TABLE option(key TEXT PRIMARY KEY, value BLOB);
-INSERT INTO option(key, value) VALUES('database_version', '""" + str(LATEST_VERSION) + """');
-"""
+schema = [
+ u"""
+ CREATE TABLE member(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ mid BLOB, -- member identifier (sha1 of public_key)
+ public_key BLOB, -- member public key
+ private_key BLOB); -- member private key
+ """,
+
+ u"""CREATE INDEX member_mid_index ON member(mid);""",
+ u"""CREATE TABLE community(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ master INTEGER REFERENCES member(id), -- master member (permission tree root)
+ member INTEGER REFERENCES member(id), -- my member (used to sign messages)
+ classification TEXT, -- community type, typically the class name
+ auto_load BOOL DEFAULT 1, -- when 1 this community is loaded whenever a packet for it is received
+ database_version INTEGER DEFAULT """ + str(LATEST_VERSION) + u""",
+ UNIQUE(master));""",
+
+ u"""CREATE TABLE meta_message(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ community INTEGER REFERENCES community(id),
+ name TEXT,
+ priority INTEGER DEFAULT 128,
+ direction INTEGER DEFAULT 1, -- direction used when synching (1 for ASC, -1 for DESC)
+ UNIQUE(community, name));""",
+
+ u"""CREATE TABLE double_signed_sync(
+ sync INTEGER REFERENCES sync(id),
+ member1 INTEGER REFERENCES member(id),
+ member2 INTEGER REFERENCES member(id));""",
+
+ u"""CREATE INDEX double_signed_sync_index_0 ON double_signed_sync(member1, member2);""",
+
+ u"""CREATE TABLE sync(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ community INTEGER REFERENCES community(id),
+ member INTEGER REFERENCES member(id), -- the creator of the message
+ global_time INTEGER,
+ meta_message INTEGER REFERENCES meta_message(id),
+ undone INTEGER DEFAULT 0,
+ packet BLOB,
+ sequence INTEGER,
+ UNIQUE(community, member, global_time));""",
+
+ u"""CREATE INDEX sync_meta_message_undone_global_time_index ON sync(meta_message, undone, global_time);""",
+
+ u"""CREATE INDEX sync_meta_message_member ON sync(meta_message, member);""",
+
+ u"""CREATE TABLE option(key TEXT PRIMARY KEY, value BLOB);""",
+
+ U"""INSERT INTO option(key, value) VALUES('database_version', '""" + str(LATEST_VERSION) + u"""');"""
+
+]
class DispersyDatabase(Database):
if __debug__:
__doc__ = schema
+ @inlineCallbacks
def check_database(self, database_version):
assert isinstance(database_version, unicode)
assert database_version.isdigit()
@@ -80,259 +84,16 @@ def check_database(self, database_version):
if database_version == 0:
# setup new database with current database_version
- self.executescript(schema)
- self.commit()
+ yield self.stormdb.executescript(schema)
else:
- # upgrade an older version
-
- # upgrade from version 1 to version 2
- if database_version < 2:
- self.executescript(u"""
-ALTER TABLE sync ADD COLUMN priority INTEGER DEFAULT 128;
-UPDATE option SET value = '2' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 2 to version 3
- if database_version < 3:
- self.executescript(u"""
-CREATE TABLE malicious_proof(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- user INTEGER REFERENCES name(id),
- packet BLOB);
-ALTER TABLE sync ADD COLUMN undone BOOL DEFAULT 0;
-UPDATE tag SET value = 'blacklist' WHERE key = 4;
-UPDATE option SET value = '3' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 3 to version 4
- if database_version < 4:
- self.executescript(u"""
--- create new tables
-
-CREATE TABLE member(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- mid BLOB,
- public_key BLOB,
- tags TEXT DEFAULT '',
- UNIQUE(public_key));
-CREATE INDEX member_mid_index ON member(mid);
-
-CREATE TABLE identity(
- community INTEGER REFERENCES community(id),
- member INTEGER REFERENCES member(id),
- host TEXT DEFAULT '',
- port INTEGER DEFAULT -1,
- PRIMARY KEY(community, member));
-
-CREATE TABLE private_key(
- member INTEGER PRIMARY KEY REFERENCES member(id),
- private_key BLOB);
-
-CREATE TABLE new_community(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- master INTEGER REFERENCES member(id),
- member INTEGER REFERENCES member(id),
- classification TEXT,
- auto_load BOOL DEFAULT 1,
- UNIQUE(master));
-
-CREATE TABLE new_reference_member_sync(
- member INTEGER REFERENCES member(id),
- sync INTEGER REFERENCES sync(id),
- UNIQUE(member, sync));
-
-CREATE TABLE meta_message(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- name TEXT,
- cluster INTEGER DEFAULT 0,
- priority INTEGER DEFAULT 128,
- direction INTEGER DEFAULT 1,
- UNIQUE(community, name));
-
-CREATE TABLE new_sync(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- member INTEGER REFERENCES member(id),
- global_time INTEGER,
- meta_message INTEGER REFERENCES meta_message(id),
- undone BOOL DEFAULT 0,
- packet BLOB,
- UNIQUE(community, member, global_time));
-CREATE INDEX sync_meta_message_index ON new_sync(meta_message);
-
-CREATE TABLE new_malicious_proof(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- member INTEGER REFERENCES name(id),
- packet BLOB);
-
--- populate new tables
-
--- no tags have ever been set outside debugging hence we do not upgrade those
-INSERT INTO member (id, mid, public_key) SELECT id, mid, public_key FROM user;
-INSERT INTO identity (community, member, host, port) SELECT community.id, user.id, user.host, user.port FROM community JOIN user;
-INSERT INTO private_key (member, private_key) SELECT member.id, key.private_key FROM key JOIN member ON member.public_key = key.public_key;
-INSERT INTO new_community (id, member, master, classification, auto_load) SELECT community.id, community.user, user.id, community.classification, community.auto_load FROM community JOIN user ON user.mid = community.cid;
-INSERT INTO new_reference_member_sync (member, sync) SELECT user, sync FROM reference_user_sync;
-INSERT INTO new_malicious_proof (id, community, member, packet) SELECT id, community, user, packet FROM malicious_proof ;
-""")
-
- # copy all data from sync and name into new_sync and meta_message
- meta_messages = {}
- for id, community, name, user, global_time, synchronization_direction, distribution_sequence, destination_cluster, packet, priority, undone in list(self.execute(u"SELECT sync.id, sync.community, name.value, sync.user, sync.global_time, sync.synchronization_direction, sync.distribution_sequence, sync.destination_cluster, sync.packet, sync.priority, sync.undone FROM sync JOIN name ON name.id = sync.name")):
-
- # get or create meta_message id
- key = (community, name)
- if not key in meta_messages:
- direction = -1 if synchronization_direction == 2 else 1
- meta_messages[key] = self.execute(u"INSERT INTO meta_message (community, name, cluster, priority, direction) VALUES (?, ?, ?, ?, ?)",
- (community, name, destination_cluster, priority, direction),
- get_lastrowid=True)
- meta_message = meta_messages[key]
-
- self.execute(u"INSERT INTO new_sync (community, member, global_time, meta_message, undone, packet) VALUES (?, ?, ?, ?, ?, ?)",
- (community, user, global_time, meta_message, undone, packet))
-
- self.executescript(u"""
--- drop old tables and entries
-
-DROP TABLE community;
-DROP TABLE key;
-DROP TABLE malicious_proof;
-DROP TABLE name;
-DROP TABLE reference_user_sync;
-DROP TABLE sync;
-DROP TABLE tag;
-DROP TABLE user;
-
--- rename replacement tables
-
-ALTER TABLE new_community RENAME TO community;
-ALTER TABLE new_reference_member_sync RENAME TO reference_member_sync;
-ALTER TABLE new_sync RENAME TO sync;
-ALTER TABLE new_malicious_proof RENAME TO malicious_proof;
-
--- update database version
-UPDATE option SET value = '4' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 4 to version 5
- if database_version < 5:
- self.executescript(u"""
-DROP TABLE candidate;
-UPDATE option SET value = '5' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 5 to version 6
- if database_version < 6:
- self.executescript(u"""
-DROP TABLE identity;
-UPDATE option SET value = '6' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 6 to version 7
- if database_version < 7:
- self.executescript(u"""
-DROP INDEX sync_meta_message_index;
-CREATE INDEX sync_meta_message_global_time_index ON sync(meta_message, global_time);
-UPDATE option SET value = '7' WHERE key = 'database_version';
-""")
- self.commit()
-
- # upgrade from version 7 to version 8
- if database_version < 8:
- self._logger.debug("upgrade database %d -> %d", database_version, 8)
- self.executescript(u"""
-ALTER TABLE community ADD COLUMN database_version INTEGER DEFAULT 0;
-UPDATE option SET value = '8' WHERE key = 'database_version';
-""")
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 8)
- self.commit()
-
- # upgrade from version 8 to version 9
- if database_version < 9:
- self._logger.debug("upgrade database %d -> %d", database_version, 9)
- self.executescript(u"""
-DROP INDEX IF EXISTS sync_meta_message_global_time_index;
-CREATE INDEX IF NOT EXISTS sync_global_time_undone_meta_message_index ON sync(global_time, undone, meta_message);
-UPDATE option SET value = '9' WHERE key = 'database_version';
-""")
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 9)
- self.commit()
-
- # upgrade from version 9 to version 10
- if database_version < 10:
- self._logger.debug("upgrade database %d -> %d", database_version, 10)
- self.executescript(u"""
-DELETE FROM option WHERE key = 'my_wan_ip';
-DELETE FROM option WHERE key = 'my_wan_port';
-UPDATE option SET value = '10' WHERE key = 'database_version';
-""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 10)
-
- # upgrade from version 10 to version 11
- if database_version < 11:
- self._logger.debug("upgrade database %d -> %d", database_version, 11)
- # unfortunately the default SCHEMA did not contain
- # sync_global_time_undone_meta_message_index but was still using
- # sync_meta_message_global_time_index in database version 10
- self.executescript(u"""
-DROP INDEX IF EXISTS sync_meta_message_global_time_index;
-DROP INDEX IF EXISTS sync_global_time_undone_meta_message_index;
-CREATE INDEX sync_meta_message_undone_global_time_index ON sync(meta_message, undone, global_time);
-UPDATE option SET value = '11' WHERE key = 'database_version';
-""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 11)
-
- # upgrade from version 11 to version 12
- if database_version < 12:
- # according to the profiler the dispersy/member.py:201(has_identity) has a
- # disproportionally long runtime. this is easily improved using the below index.
- self._logger.debug("upgrade database %d -> %d", database_version, 12)
- self.executescript(u"""
-CREATE INDEX sync_meta_message_member ON sync(meta_message, member);
-UPDATE option SET value = '12' WHERE key = 'database_version';
-""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 12)
-
- # upgrade from version 12 to version 13
- if database_version < 13:
- self._logger.debug("upgrade database %d -> %d", database_version, 13)
- # reference_member_sync is a very generic but also expensive way to store
- # multi-sighned messages. by simplifying the milti-sign into purely double-sign we
- # can use a less expensive (in terms of query time) table. note: we simply drop the
- # table, we assume that there is no data in there since no release has been made
- # that uses the multi-sign feature
- self.executescript(u"""
-DROP TABLE reference_member_sync;
-CREATE TABLE double_signed_sync(
- sync INTEGER REFERENCES sync(id),
- member1 INTEGER REFERENCES member(id),
- member2 INTEGER REFERENCES member(id));
-CREATE INDEX double_signed_sync_index_0 ON double_signed_sync(member1, member2);
-UPDATE option SET value = '13' WHERE key = 'database_version';
-""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 13)
-
- # upgrade from version 13 to version 16
+ # Check if the version is not higher than the latest version this Dispersy covers.
+ if database_version > LATEST_VERSION:
+ raise RuntimeError(u"Your database version exceeds the latest version.")
+
+ # Check if the version is below what we support.
if database_version < 16:
- self._logger.debug("upgrade database %d -> %d", database_version, 16)
- # only effects check_community_database
- self.executescript(u"""UPDATE option SET value = '16' WHERE key = 'database_version';""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, 16)
+ raise RuntimeError(u"Database version too low to upgrade.")
# upgrade from version 16 to version 17
if database_version < 17:
@@ -342,51 +103,61 @@ def check_database(self, database_version):
# Member instances. unfortunately this requires the removal of the UNIQUE clause,
# however, the python code already guarantees that the public_key remains unique.
self._logger.info("upgrade database %d -> %d", database_version, 17)
- self.executescript(u"""
--- move / remove old member table
-DROP INDEX IF EXISTS member_mid_index;
-ALTER TABLE member RENAME TO old_member;
--- create new member table
-CREATE TABLE member(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- mid BLOB, -- member identifier (sha1 of public_key)
- public_key BLOB, -- member public key
- tags TEXT DEFAULT ''); -- comma separated tags: store, ignore, and blacklist
-CREATE INDEX member_mid_index ON member(mid);
--- fill new member table with old data
-INSERT INTO member (id, mid, public_key, tags) SELECT id, mid, public_key, tags FROM old_member;
--- remove old member table
-DROP TABLE old_member;
--- update database version
-UPDATE option SET value = '17' WHERE key = 'database_version';
-""")
- self.commit()
+ yield self.stormdb.executescript([
+ u"""-- move / remove old member table
+ DROP INDEX IF EXISTS member_mid_index;""",
+
+ u"""ALTER TABLE member RENAME TO old_member;""",
+
+ u"""-- create new member table
+ CREATE TABLE member(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ mid BLOB, -- member identifier (sha1 of public_key)
+ public_key BLOB, -- member public key
+ tags TEXT DEFAULT ''); -- comma separated tags: store, ignore, and blacklist""",
+ u"""CREATE INDEX member_mid_index ON member(mid);""",
+
+ u"""-- fill new member table with old data
+ INSERT INTO member (id, mid, public_key, tags) SELECT id, mid, public_key, tags FROM old_member;""",
+
+ u"""-- remove old member table
+ DROP TABLE old_member;""",
+
+ u"""-- update database version
+ UPDATE option SET value = '17' WHERE key = 'database_version';"""
+ ])
self._logger.info("upgrade database %d -> %d (done)", database_version, 17)
# upgrade from version 17 to version 18
if database_version < 18:
# In version 18, we remove the tags column as we don't have blackisting anymore
self._logger.debug("upgrade database %d -> %d", database_version, 18)
- self.executescript(u"""
--- move / remove old member table
-DROP INDEX IF EXISTS member_mid_index;
-ALTER TABLE member RENAME TO old_member;
--- create new member table
-CREATE TABLE member(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- mid BLOB, -- member identifier (sha1 of public_key)
- public_key BLOB); -- member public key
-CREATE INDEX member_mid_index ON member(mid);
--- fill new member table with old data
-INSERT INTO member (id, mid, public_key) SELECT id, mid, public_key FROM old_member;
--- remove old member table
-DROP TABLE old_member;
--- remove table malicious_proof
-DROP TABLE IF EXISTS malicious_proof;
--- update database version
-UPDATE option SET value = '18' WHERE key = 'database_version';
-""")
- self.commit()
+ yield self.stormdb.executescript([
+ u"""-- move / remove old member table
+ DROP INDEX IF EXISTS member_mid_index;""",
+
+ u"""ALTER TABLE member RENAME TO old_member;""",
+
+ u"""-- create new member table
+ CREATE TABLE member(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ mid BLOB, -- member identifier (sha1 of public_key)
+ public_key BLOB); -- member public key""",
+
+ u"""CREATE INDEX member_mid_index ON member(mid);""",
+
+ u"""-- fill new member table with old data
+ INSERT INTO member (id, mid, public_key) SELECT id, mid, public_key FROM old_member;""",
+
+ u"""-- remove old member table
+ DROP TABLE old_member;""",
+
+ u"""-- remove table malicious_proof
+ DROP TABLE IF EXISTS malicious_proof;""",
+
+ u"""-- update database version
+ UPDATE option SET value = '18' WHERE key = 'database_version';""",
+ ])
self._logger.debug("upgrade database %d -> %d (done)", database_version, 18)
# upgrade from version 18 to version 19
@@ -395,197 +166,124 @@ def check_database(self, database_version):
# actually simplify the code.
self._logger.debug("upgrade database %d -> %d", database_version, 19)
- self.executescript(u"""
--- move / remove old member table
-DROP INDEX IF EXISTS member_mid_index;
-ALTER TABLE member RENAME TO old_member;
--- create new member table
- CREATE TABLE member(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- mid BLOB, -- member identifier (sha1 of public_key)
- public_key BLOB, -- member public key
- private_key BLOB); -- member private key
-CREATE INDEX member_mid_index ON member(mid);
--- fill new member table with old data
-INSERT INTO member (id, mid, public_key, private_key)
- SELECT id, mid, public_key, private_key.private_key FROM old_member
- LEFT JOIN private_key ON private_key.member = old_member.id;
--- remove old member table
-DROP TABLE old_member;
--- remove table private_key
-DROP TABLE IF EXISTS private_key;
--- update database version
-UPDATE option SET value = '19' WHERE key = 'database_version';
-""")
- self.commit()
+ yield self.stormdb.executescript([
+ u"""-- move / remove old member table
+ DROP INDEX IF EXISTS member_mid_index;""",
+
+ u"""ALTER TABLE member RENAME TO old_member;""",
+
+ u"""-- create new member table
+ CREATE TABLE member(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ mid BLOB, -- member identifier (sha1 of public_key)
+ public_key BLOB, -- member public key
+ private_key BLOB); -- member private key""",
+
+ u"""CREATE INDEX member_mid_index ON member(mid);""",
+
+ u"""-- fill new member table with old data
+ INSERT INTO member (id, mid, public_key, private_key)
+ SELECT id, mid, public_key, private_key.private_key FROM old_member
+ LEFT JOIN private_key ON private_key.member = old_member.id;""",
+
+ u"""-- remove old member table
+ DROP TABLE old_member;""",
+
+ u"""-- remove table private_key
+ DROP TABLE IF EXISTS private_key;""",
+
+ u"""-- update database version
+ UPDATE option SET value = '19' WHERE key = 'database_version';"""
+ ])
self._logger.debug("upgrade database %d -> %d (done)", database_version, 19)
- new_db_version = 20
- if database_version < new_db_version:
+ # Upgrade from 19 to 20
+ if database_version < 20:
# Let's store the sequence numbers in the database instead of quessing
- self._logger.debug("upgrade database %d -> %d", database_version, new_db_version)
-
- self.executescript(u"""
-DROP INDEX IF EXISTS sync_meta_message_undone_global_time_index;
-DROP INDEX IF EXISTS sync_meta_message_member;
-""")
- old_sync = list(self.execute(u"""
- SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'old_sync';"""))
+ self._logger.debug("upgrade database %d -> %d", database_version, 20)
+
+ yield self.stormdb.executescript([
+ u"""DROP INDEX IF EXISTS sync_meta_message_undone_global_time_index;""",
+
+ u"""DROP INDEX IF EXISTS sync_meta_message_member;"""
+ ])
+
+ old_sync = yield self.stormdb.fetchall(u"""
+ SELECT name FROM sqlite_master WHERE type = 'table' AND name = 'old_sync';""")
if old_sync:
# delete the sync table and start copying data again
- self.executescript(u"""
-DROP TABLE IF EXISTS sync;
-DROP INDEX IF EXISTS sync_meta_message_undone_global_time_index;
-DROP INDEX IF EXISTS sync_meta_message_member;
-""")
+ yield self.stormdb.executescript([
+ u"""DROP TABLE IF EXISTS sync;""",
+
+ u"""DROP INDEX IF EXISTS sync_meta_message_undone_global_time_index;""",
+
+ u"""DROP INDEX IF EXISTS sync_meta_message_member;"""
+ ])
else:
# rename sync to old_sync if it is the first time
- self.executescript(u"ALTER TABLE sync RENAME TO old_sync;")
-
- self.executescript(u"""
-CREATE TABLE IF NOT EXISTS sync(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- member INTEGER REFERENCES member(id), -- the creator of the message
- global_time INTEGER,
- meta_message INTEGER REFERENCES meta_message(id),
- undone INTEGER DEFAULT 0,
- packet BLOB,
- sequence INTEGER,
- UNIQUE(community, member, global_time, sequence));
-
-CREATE INDEX sync_meta_message_undone_global_time_index ON sync(meta_message, undone, global_time);
-CREATE INDEX sync_meta_message_member ON sync(meta_message, member);
-
-INSERT INTO sync (id, community, member, global_time, meta_message, undone, packet, sequence)
- SELECT id, community, member, global_time, meta_message, undone, packet, NULL from old_sync;
-
-DROP TABLE IF EXISTS old_sync;
-
-UPDATE option SET value = '20' WHERE key = 'database_version';
-""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, new_db_version)
-
- new_db_version = 21
- if database_version < new_db_version:
+ yield self.stormdb.execute(u"ALTER TABLE sync RENAME TO old_sync;")
+
+ yield self.stormdb.executescript([
+ u"""CREATE TABLE IF NOT EXISTS sync(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ community INTEGER REFERENCES community(id),
+ member INTEGER REFERENCES member(id), -- the creator of the message
+ global_time INTEGER,
+ meta_message INTEGER REFERENCES meta_message(id),
+ undone INTEGER DEFAULT 0,
+ packet BLOB,
+ sequence INTEGER,
+ UNIQUE(community, member, global_time, sequence));
+ """,
+
+ u"""
+ CREATE INDEX sync_meta_message_undone_global_time_index
+ ON sync(meta_message, undone, global_time);
+ """,
+
+ u"""CREATE INDEX sync_meta_message_member ON sync(meta_message, member);""",
+
+ u"""
+ INSERT INTO sync (id, community, member, global_time, meta_message, undone, packet, sequence)
+ SELECT id, community, member, global_time, meta_message, undone, packet, NULL FROM old_sync;
+ """,
+
+ u"""DROP TABLE IF EXISTS old_sync;""",
+
+ u"""UPDATE option SET value = '20' WHERE key = 'database_version';"""
+ ])
+ self._logger.debug("upgrade database %d -> %d (done)", database_version, 20)
+
+ # Upgrade from 20 to 21
+ if database_version < 21:
# remove 'cluster' column from meta_message table
- self._logger.debug("upgrade database %d -> %d", database_version, new_db_version)
- self.executescript(u"""
-CREATE TABLE meta_message_new(
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- community INTEGER REFERENCES community(id),
- name TEXT,
- priority INTEGER DEFAULT 128,
- direction INTEGER DEFAULT 1, -- direction used when synching (1 for ASC, -1 for DESC)
- UNIQUE(community, name));
-
-INSERT INTO meta_message_new(id, community, name, priority, direction)
- SELECT id, community, name, priority, direction FROM meta_message ORDER BY id;
-
-DROP TABLE meta_message;
-ALTER TABLE meta_message_new RENAME TO meta_message;
-
-UPDATE option SET value = '21' WHERE key = 'database_version';""")
- self.commit()
- self._logger.debug("upgrade database %d -> %d (done)", database_version, new_db_version)
-
- new_db_version = 22
- if database_version < new_db_version:
- # there is no version new_db_version yet...
- # self._logger.debug("upgrade database %d -> %d", database_version, new_db_version)
- # self.executescript(u"""UPDATE option SET value = '22' WHERE key = 'database_version';""")
- # self.commit()
- # self._logger.debug("upgrade database %d -> %d (done)", database_version, new_db_version)
- pass
-
- return LATEST_VERSION
-
- def check_community_database(self, community, database_version):
- assert isinstance(database_version, int)
- assert database_version >= 0
-
- if database_version < 8:
- self._logger.debug("upgrade community %d -> %d", database_version, 8)
-
- # patch notes:
- #
- # - the undone column in the sync table is not a boolean anymore. instead it points to
- # the row id of one of the associated dispersy-undo-own or dispersy-undo-other
- # messages
- #
- # - we know that Dispersy.create_undo has been called while the member did not have
- # permission to do so. hence, invalid dispersy-undo-other messages have been stored
- # in the local database, causing problems with the sync. these need to be removed
- #
- updates = []
- deletes = []
- redoes = []
- convert_packet_to_message = community.dispersy.convert_packet_to_message
- undo_own_meta = community.get_meta_message(u"dispersy-undo-own")
- undo_other_meta = community.get_meta_message(u"dispersy-undo-other")
+ self._logger.debug("upgrade database %d -> %d", database_version, 21)
+ yield self.stormdb.executescript([
+ u"""CREATE TABLE meta_message_new(
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ community INTEGER REFERENCES community(id),
+ name TEXT,
+ priority INTEGER DEFAULT 128,
+ direction INTEGER DEFAULT 1, -- direction used when synching (1 for ASC, -1 for DESC)
+ UNIQUE(community, name));""",
- progress = 0
- count, = self.execute(u"SELECT COUNT(1) FROM sync WHERE meta_message = ? OR meta_message = ?", (undo_own_meta.database_id, undo_other_meta.database_id)).next()
- self._logger.debug("upgrading %d undo messages", count)
- if count > 50:
- progress_handlers = [handler("Upgrading database", "Please wait while we upgrade the database", count) for handler in community.dispersy.get_progress_handlers()]
- else:
- progress_handlers = []
+ u"""INSERT INTO meta_message_new(id, community, name, priority, direction)
+ SELECT id, community, name, priority, direction FROM meta_message ORDER BY id;""",
- for packet_id, packet in list(self.execute(u"SELECT id, packet FROM sync WHERE meta_message = ?", (undo_own_meta.database_id,))):
- message = convert_packet_to_message(str(packet), community, verify=False)
- if message:
- # 12/09/12 Boudewijn: the check_callback is required to obtain the
- # message.payload.packet
- for _ in message.check_callback([message]):
- pass
- updates.append((packet_id, message.payload.packet.packet_id))
-
- progress += 1
- for handler in progress_handlers:
- handler.Update(progress)
-
- for packet_id, packet in list(self.execute(u"SELECT id, packet FROM sync WHERE meta_message = ?", (undo_other_meta.database_id,))):
- message = convert_packet_to_message(str(packet), community, verify=False)
- if message:
- # 12/09/12 Boudewijn: the check_callback is required to obtain the
- # message.payload.packet
- for _ in message.check_callback([message]):
- pass
- allowed, _ = community._timeline.check(message)
- if allowed:
- updates.append((packet_id, message.payload.packet.packet_id))
-
- else:
- deletes.append((packet_id,))
- msg = message.payload.packet.load_message()
- redoes.append((msg.packet_id,))
- if msg.undo_callback:
- try:
- # try to redo the message... this may not always be possible now...
- msg.undo_callback([(msg.authentication.member, msg.distribution.global_time, msg)], redo=True)
- except Exception as exception:
- self._logger.exception("%s", exception)
-
- progress += 1
- for handler in progress_handlers:
- handler.Update(progress)
+ u"""DROP TABLE meta_message;""",
- for handler in progress_handlers:
- handler.Update(progress, "Saving the results...")
+ u"""ALTER TABLE meta_message_new RENAME TO meta_message;""",
- # note: UPDATE first, REDOES second, since UPDATES contains undo items that may have
- # been invalid
- self.executemany(u"UPDATE sync SET undone = ? WHERE id = ?", updates)
- self.executemany(u"UPDATE sync SET undone = 0 WHERE id = ?", redoes)
- self.executemany(u"DELETE FROM sync WHERE id = ?", deletes)
+ u"""UPDATE option SET value = '21' WHERE key = 'database_version';"""
+ ])
+ self._logger.debug("upgrade database %d -> %d (done)", database_version, 21)
- self.execute(u"UPDATE community SET database_version = 8 WHERE id = ?", (community.database_id,))
- self.commit()
+ returnValue(LATEST_VERSION)
- for handler in progress_handlers:
- handler.Destroy()
+ @inlineCallbacks
+ def check_community_database(self, community, database_version):
+ assert isinstance(database_version, int)
+ assert database_version >= 0
if database_version < 21:
self._logger.debug("upgrade community %d -> %d", database_version, 20)
@@ -629,14 +327,14 @@ def check_community_database(self, community, database_version):
# all meta messages that use sequence numbers
metas = [meta for meta in community.get_meta_messages() if (
- isinstance(meta.distribution, FullSyncDistribution) and meta.distribution.enable_sequence_number)]
- convert_packet_to_message = community.dispersy.convert_packet_to_message
+ isinstance(meta.distribution, FullSyncDistribution) and meta.distribution.enable_sequence_number)]
+ convert_packet_to_message = yield community.dispersy.convert_packet_to_message
progress = 0
count = 0
deletes = []
for meta in metas:
- i, = next(self.execute(u"SELECT COUNT(*) FROM sync WHERE meta_message = ?", (meta.database_id,)))
+ i, = yield self.stormdb.fetchone(u"SELECT COUNT(*) FROM sync WHERE meta_message = ?", (meta.database_id,))
count += i
self._logger.debug("checking %d sequence number enabled messages [%s]", count, community.cid.encode("HEX"))
if count > 50:
@@ -647,8 +345,8 @@ def check_community_database(self, community, database_version):
sequence_updates = []
for meta in metas:
- rows = list(self.execute(u"SELECT id, member, packet FROM sync "
- u"WHERE meta_message = ? ORDER BY member, global_time", (meta.database_id,)))
+ rows = yield self.stormdb.fetchall(u"SELECT id, member, packet FROM sync "
+ u"WHERE meta_message = ? ORDER BY member, global_time", (meta.database_id,))
groups = groupby(rows, key=lambda tup: tup[1])
for member_id, iterator in groups:
last_global_time = 0
@@ -658,7 +356,7 @@ def check_community_database(self, community, database_version):
if message:
assert message.authentication.member.database_id == member_id
if (last_sequence_number + 1 == message.distribution.sequence_number and
- last_global_time < message.distribution.global_time):
+ last_global_time < message.distribution.global_time):
# message is OK
sequence_updates.append((message.distribution.sequence_number, packet_id))
last_sequence_number += 1
@@ -681,25 +379,24 @@ def check_community_database(self, community, database_version):
self._logger.debug("will delete %d packets from the database", len(deletes))
if deletes:
- self.executemany(u"DELETE FROM sync WHERE id = ?", deletes)
+ yield self.stormdb.executemany(u"DELETE FROM sync WHERE id = ?", deletes)
if sequence_updates:
- self.executemany(u"UPDATE sync SET sequence = ? WHERE id = ?", sequence_updates)
+ yield self.stormdb.executemany(u"UPDATE sync SET sequence = ? WHERE id = ?", sequence_updates)
# we may have removed some undo-other or undo-own messages. we must ensure that there
# are no messages in the database that point to these removed messages
- updates = list(self.execute(u"""
+ updates = yield self.stormdb.fetchall(u"""
SELECT a.id
FROM sync a
LEFT JOIN sync b ON a.undone = b.id
- WHERE a.community = ? AND a.undone > 0 AND b.id is NULL""", (community.database_id,)))
+ WHERE a.community = ? AND a.undone > 0 AND b.id IS NULL""", (community.database_id,))
if updates:
- self.executemany(u"UPDATE sync SET undone = 0 WHERE id = ?", updates)
+ yield self.stormdb.executemany(u"UPDATE sync SET undone = 0 WHERE id = ?", updates)
- self.execute(u"UPDATE community SET database_version = 21 WHERE id = ?", (community.database_id,))
- self.commit()
+ yield self.stormdb.execute(u"UPDATE community SET database_version = 21 WHERE id = ?", (community.database_id,))
for handler in progress_handlers:
handler.Destroy()
- return LATEST_VERSION
+ returnValue(LATEST_VERSION)
diff --git a/endpoint.py b/endpoint.py
index 03d50267..16565e3a 100644
--- a/endpoint.py
+++ b/endpoint.py
@@ -9,6 +9,8 @@
from time import time
from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python.threadable import isInIOThread
from .candidate import Candidate
@@ -50,9 +52,10 @@ def close(self, timeout=0.0):
assert isinstance(timeout, float), type(timeout)
return True
+ @inlineCallbacks
def log_packet(self, sock_addr, packet, outbound=True):
try:
- community = self._dispersy.get_community(packet[2:22], load=False, auto_load=False)
+ community = yield self._dispersy.get_community(packet[2:22], load=False, auto_load=False)
# find associated conversion
conversion = community.get_conversion_for_packet(packet)
@@ -171,6 +174,7 @@ def close(self, timeout=10.0):
return super(StandaloneEndpoint, self).close(timeout) and result
+ @inlineCallbacks
def _loop(self):
assert self._dispersy, "Should not be called before open(...)"
recvfrom = self._socket.recvfrom
@@ -187,7 +191,7 @@ def _loop(self):
# Furthermore, if we are allowed to send, process sendqueue immediately
if write_list:
- self._process_sendqueue()
+ yield self._process_sendqueue()
prev_sendqueue = time()
if read_list:
@@ -207,8 +211,9 @@ def _loop(self):
finally:
if packets:
self._logger.debug('%d came in, %d bytes in total', len(packets), sum(len(packet) for _, packet in packets))
- self.data_came_in(packets)
+ yield self.data_came_in(packets)
+ @inlineCallbacks
def data_came_in(self, packets, cache=True):
assert self._dispersy, "Should not be called before open(...)"
assert isinstance(packets, (list, tuple)), type(packets)
@@ -219,19 +224,23 @@ def data_came_in(self, packets, cache=True):
packet[1].startswith(p)), None)
if prefix:
sock_addr, data = packet
- self.packet_handlers[prefix](sock_addr, data[len(prefix):])
+ yield self.packet_handlers[prefix](sock_addr, data[len(prefix):])
else:
+ sock_addr, _ = packet
+ if sock_addr[0] == "127.0.0.1":
+ continue
normal_packets.append(packet)
if normal_packets:
self._dispersy.statistics.total_down += sum(len(data) for _, data in normal_packets)
if self._logger.isEnabledFor(logging.DEBUG):
for sock_addr, data in normal_packets:
- self.log_packet(sock_addr, data, outbound=False)
+ yield self.log_packet(sock_addr, data, outbound=False)
- # The endpoint runs on it's own thread, so we can't do a callLater here
+ # The endpoint runs on its own thread, so we can't do a callLater here
reactor.callFromThread(self.dispersythread_data_came_in, normal_packets, time(), cache)
+ @inlineCallbacks
def dispersythread_data_came_in(self, packets, timestamp, cache=True):
assert self._dispersy, "Should not be called before open(...)"
@@ -242,13 +251,14 @@ def strip_if_tunnel(packets):
else:
yield False, sock_addr, data
- self._dispersy.on_incoming_packets([(Candidate(sock_addr, tunnel), data)
+ yield self._dispersy.on_incoming_packets([(Candidate(sock_addr, tunnel), data)
for tunnel, sock_addr, data
in strip_if_tunnel(packets)],
cache,
timestamp,
u"standalone_ep")
+ @inlineCallbacks
def send(self, candidates, packets, prefix=None):
assert self._dispersy, "Should not be called before open(...)"
assert isinstance(candidates, (tuple, list, set)), type(candidates)
@@ -265,11 +275,13 @@ def send(self, candidates, packets, prefix=None):
send_packet = False
for candidate, packet in product(candidates, packets):
- if self.send_packet(candidate, packet):
+ send_packet_result = yield self.send_packet(candidate, packet)
+ if send_packet_result:
send_packet = True
- return send_packet
+ returnValue(send_packet)
+ @inlineCallbacks
def send_packet(self, candidate, packet, prefix=None):
assert self._dispersy, "Should not be called before open(...)"
assert isinstance(candidate, Candidate), type(candidate)
@@ -290,7 +302,7 @@ def send_packet(self, candidate, packet, prefix=None):
self._socket.sendto(data, candidate.sock_addr)
if self._logger.isEnabledFor(logging.DEBUG):
- self.log_packet(candidate.sock_addr, packet)
+ yield self.log_packet(candidate.sock_addr, packet)
except socket.error:
with self._sendqueue_lock:
@@ -299,10 +311,11 @@ def send_packet(self, candidate, packet, prefix=None):
# If we did not have a sendqueue, then we need to call process_sendqueue in order send these messages
if not did_have_senqueue:
- self._process_sendqueue()
+ yield self._process_sendqueue()
- return True
+ returnValue(True)
+ @inlineCallbacks
def _process_sendqueue(self):
assert self._dispersy, "Should not be called before start(...)"
with self._sendqueue_lock:
@@ -322,7 +335,7 @@ def _process_sendqueue(self):
index += 1
if self._logger.isEnabledFor(logging.DEBUG):
- self.log_packet(sock_addr, data)
+ yield self.log_packet(sock_addr, data)
except socket.error as e:
if e[0] != SOCKET_BLOCK_ERRORCODE:
@@ -366,11 +379,13 @@ def clear_receive_queue(self):
len(packets), sum(len(packet) for _, packet in packets))
return packets
+ @inlineCallbacks
def process_receive_queue(self):
packets = self.clear_receive_queue()
- self.process_packets(packets)
- return packets
+ yield self.process_packets(packets)
+ returnValue(packets)
+ @inlineCallbacks
def process_packets(self, packets, cache=True):
self._logger.debug('processing %d packets', len(packets))
- StandaloneEndpoint.data_came_in(self, packets, cache=cache)
+ yield StandaloneEndpoint.data_came_in(self, packets, cache=cache)
diff --git a/message.py b/message.py
index 1c9091c4..eeed8dc3 100644
--- a/message.py
+++ b/message.py
@@ -2,6 +2,9 @@
from abc import ABCMeta, abstractmethod, abstractproperty
from time import time
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.defer import returnValue
+
from .authentication import Authentication
from .candidate import Candidate, LoopbackCandidate
from .destination import Destination
@@ -82,9 +85,11 @@ def __init__(self, community, missing_member_id):
def match_info(self):
return (self._cid, u"dispersy-identity", self._missing_member_id, None, []),
+ @inlineCallbacks
def send_request(self, community, candidate):
- return community.create_missing_identity(candidate,
- community.dispersy.get_member(mid=self._missing_member_id))
+ dispersy_member = yield community.dispersy.get_member(mid=self._missing_member_id)
+ missing_identity = yield community.create_missing_identity(candidate, dispersy_member)
+ returnValue(missing_identity)
class DelayPacketByMissingMessage(DelayPacket):
@@ -100,8 +105,10 @@ def __init__(self, community, member, global_time):
def match_info(self):
return (self._cid, None, self._member.mid, self._global_time, []),
+ @inlineCallbacks
def send_request(self, community, candidate):
- return community.create_missing_message(candidate, self._member, self._global_time)
+ missing_message = yield community.create_missing_message(candidate, self._member, self._global_time)
+ returnValue(missing_message)
class DropPacket(Exception):
@@ -140,8 +147,9 @@ def match_info(self):
def resume_immediately(self):
return True
+ @inlineCallbacks
def send_request(self, community, candidate):
- community.create_missing_proof(candidate, self._delayed)
+ yield community.create_missing_proof(candidate, self._delayed)
class DelayMessageBySequence(DelayMessage):
@@ -161,8 +169,9 @@ def duplicate(self, delayed):
def match_info(self):
return (self._cid, None, self._delayed.authentication.member.mid, None, range(self._missing_low, self._missing_high + 1)),
+ @inlineCallbacks
def send_request(self, community, candidate):
- community.create_missing_sequence(candidate, self._delayed.authentication.member,
+ yield community.create_missing_sequence(candidate, self._delayed.authentication.member,
self._delayed.meta, self._missing_low, self._missing_high)
@@ -182,8 +191,9 @@ def duplicate(self, delayed):
def match_info(self):
return (self._cid, None, self._member.mid, self._global_time, []),
+ @inlineCallbacks
def send_request(self, community, candidate):
- community.create_missing_message(candidate, self._member, self._global_time)
+ yield community.create_missing_message(candidate, self._member, self._global_time)
class DropMessage(Exception):
@@ -302,10 +312,11 @@ def packet_id(self, packet_id):
assert isinstance(packet_id, (int, long))
self._packet_id = packet_id
+ @inlineCallbacks
def load_message(self):
- message = self._meta.community.dispersy.convert_packet_to_message(self._packet, self._meta.community, verify=False)
+ message = yield self._meta.community.dispersy.convert_packet_to_message(self._packet, self._meta.community, verify=False)
message.packet_id = self._packet_id
- return message
+ returnValue(message)
def __str__(self):
return "<%s.%s %s %dbytes>" % (self._meta.__class__.__name__, self.__class__.__name__, self._meta._name, len(self._packet))
@@ -318,7 +329,7 @@ class Message(MetaObject):
class Implementation(Packet):
- def __init__(self, meta, authentication, resolution, distribution, destination, payload, conversion=None, candidate=None, source=u"unknown", packet="", packet_id=0, sign=True):
+ def __init__(self, meta, authentication, resolution, distribution, destination, payload, conversion=None, candidate=None, source=u"unknown", packet="", packet_id=0):
from .conversion import Conversion
assert isinstance(meta, Message), "META has invalid type '%s'" % type(meta)
assert isinstance(authentication, meta.authentication.Implementation), "AUTHENTICATION has invalid type '%s'" % type(authentication)
@@ -358,16 +369,24 @@ def __init__(self, meta, authentication, resolution, distribution, destination,
else:
self._conversion = meta.community.get_conversion_for_message(self)
- if not packet:
- self._packet = self._conversion.encode_message(self, sign=sign)
-
- if __debug__: # attempt to decode the message when running in debug
- try:
- self._conversion.decode_message(LoopbackCandidate(), self._packet, verify=sign, allow_empty_signature=True)
- except DropPacket:
- from binascii import hexlify
- self._logger.error("Could not decode message created by me, hex '%s'", hexlify(self._packet))
- raise
+ @inlineCallbacks
+ def initialize_packet(self, sign):
+ """
+ Must be called if packet was None in the constructor.
+ Args:
+ sign: The verify sign for the packet.
+
+ """
+ self._packet = yield self._conversion.encode_message(self, sign=sign)
+
+ if __debug__: # attempt to decode the message when running in debug
+ try:
+ yield self._conversion.decode_message(LoopbackCandidate(), self._packet, verify=sign,
+ allow_empty_signature=True)
+ except DropPacket:
+ from binascii import hexlify
+ self._logger.error("Could not decode message created by me, hex '%s'", hexlify(self._packet))
+ raise
@property
def conversion(self):
@@ -413,11 +432,12 @@ def resume(self, message):
def load_message(self):
return self
+ @inlineCallbacks
def regenerate_packet(self, packet=""):
if packet:
self._packet = packet
else:
- self._packet = self._conversion.encode_message(self)
+ self._packet = yield self._conversion.encode_message(self)
def __str__(self):
return "<%s.%s %s>" % (self._meta.__class__.__name__, self.__class__.__name__, self._meta._name)
@@ -510,6 +530,7 @@ def undo_callback(self):
def batch(self):
return self._batch
+ @inlineCallbacks
def impl(self, authentication=(), resolution=(), distribution=(), destination=(), payload=(), *args, **kargs):
assert isinstance(authentication, tuple), type(authentication)
assert isinstance(resolution, tuple), type(resolution)
@@ -522,8 +543,13 @@ def impl(self, authentication=(), resolution=(), distribution=(), destination=()
distribution_impl = self._distribution.Implementation(self._distribution, *distribution)
destination_impl = self._destination.Implementation(self._destination, *destination)
payload_impl = self._payload.Implementation(self._payload, *payload)
- return self.Implementation(self, authentication_impl, resolution_impl, distribution_impl, destination_impl, payload_impl, *args, **kargs)
+ impl = self.Implementation(self, authentication_impl, resolution_impl, distribution_impl, destination_impl, payload_impl, *args, **kargs)
+ packet = kargs.get("packet", "")
+ if not packet:
+ sign = kargs["sign"] if "sign" in kargs else True
+ yield impl.initialize_packet(sign)
+ returnValue(impl)
except (TypeError, DropPacket):
self._logger.error("message name: %s", self._name)
self._logger.error("authentication: %s.Implementation", self._authentication.__class__.__name__)
diff --git a/requestcache.py b/requestcache.py
index 4a9846eb..a31c19d3 100644
--- a/requestcache.py
+++ b/requestcache.py
@@ -2,6 +2,7 @@
import logging
from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks
from twisted.python.threadable import isInIOThread
from .taskmanager import TaskManager
@@ -89,7 +90,7 @@ class IntroductionRequestCache(RandomNumberCache):
@property
def timeout_delay(self):
# we will accept the response at most 10.5 seconds after our request
- return 10.5
+ return 100.5
def __init__(self, community, helper_candidate):
super(IntroductionRequestCache, self).__init__(community.request_cache, u"introduction-request")
@@ -198,6 +199,7 @@ def pop(self, prefix, number):
self.cancel_pending_task(cache)
return cache
+ @inlineCallbacks
def _on_timeout(self, cache):
"""
Called CACHE.timeout_delay seconds after CACHE was added to this RequestCache.
@@ -210,7 +212,7 @@ def _on_timeout(self, cache):
assert isinstance(cache, NumberCache), type(cache)
self._logger.debug("timeout on %s", cache)
- cache.on_timeout()
+ yield cache.on_timeout()
# the on_timeout call could have already removed the identifier from the cache using pop
identifier = self._create_identifier(cache.number, cache.prefix)
diff --git a/statistics.py b/statistics.py
index 9a948f84..981ad8ce 100644
--- a/statistics.py
+++ b/statistics.py
@@ -3,6 +3,8 @@
from threading import RLock
from time import time
+from twisted.internet.defer import inlineCallbacks
+
class Statistics(object):
@@ -195,11 +197,13 @@ def __init__(self, dispersy):
self.msg_statistics = MessageStatistics()
self.enable_debug_statistics(__debug__)
- self.update()
+ @inlineCallbacks
+ def initialize(self):
+ yield self.update()
@property
def database_version(self):
- return self._dispersy.database.database_version
+ return self._dispersy.database.stormdb.version
@property
def lan_address(self):
@@ -236,12 +240,13 @@ def enable_debug_statistics(self, enable):
def are_debug_statistics_enabled(self):
return self._enabled
+ @inlineCallbacks
def update(self, database=False):
self.timestamp = time()
self.communities = [community.statistics for community in self._dispersy.get_communities()]
for community in self.communities:
- community.update(database=database)
+ yield community.update(database=database)
# list with {count=int, duration=float, average=float, entry=str} dictionaries. each entry
# represents a key from the attach_runtime_statistics decorator
@@ -348,9 +353,11 @@ def candidates(self):
def enable_debug_statistics(self, enabled):
self.msg_statistics.enable(enabled)
+ @inlineCallbacks
def update(self, database=False):
if database:
- self.database = dict(self._community.dispersy.database.execute(u"SELECT meta_message.name, COUNT(sync.id) FROM sync JOIN meta_message ON meta_message.id = sync.meta_message WHERE sync.community = ? GROUP BY sync.meta_message", (self._community.database_id,)))
+ sync_data = yield self._community.dispersy.database.stormdb.fetchall(u"SELECT meta_message.name, COUNT(sync.id) FROM sync JOIN meta_message ON meta_message.id = sync.meta_message WHERE sync.community = ? GROUP BY sync.meta_message", (self._community.database_id,))
+ self.database = dict(sync_data)
else:
self.database = dict()
diff --git a/tests/data/dispersy_v1.db b/tests/data/dispersy_v1.db
new file mode 100644
index 00000000..88364279
Binary files /dev/null and b/tests/data/dispersy_v1.db differ
diff --git a/tests/data/dispersy_v1337.db b/tests/data/dispersy_v1337.db
new file mode 100644
index 00000000..bf83b044
Binary files /dev/null and b/tests/data/dispersy_v1337.db differ
diff --git a/tests/data/dispersy_v16.db b/tests/data/dispersy_v16.db
new file mode 100644
index 00000000..8c0a98b3
Binary files /dev/null and b/tests/data/dispersy_v16.db differ
diff --git a/tests/debugcommunity/community.py b/tests/debugcommunity/community.py
index d2b910f2..08ad874c 100644
--- a/tests/debugcommunity/community.py
+++ b/tests/debugcommunity/community.py
@@ -1,3 +1,5 @@
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from ...authentication import DoubleMemberAuthentication, MemberAuthentication
from ...candidate import Candidate
from ...community import Community, HardKilledCommunity
@@ -193,6 +195,7 @@ def initiate_meta_messages(self):
#
# double-signed-text
#
+ @inlineCallbacks
def allow_double_signed_text(self, message):
"""
Received a request to sign MESSAGE.
@@ -203,19 +206,21 @@ def allow_double_signed_text(self, message):
allow_text = message.payload.text
assert allow_text.startswith("Allow=True") or allow_text.startswith("Allow=False") or allow_text.startswith("Allow=Modify") or allow_text.startswith("Allow=Append")
if allow_text.startswith("Allow=True"):
- return message
+ returnValue(message)
if allow_text.startswith("Allow=Modify"):
meta = message.meta
- return meta.impl(authentication=(message.authentication.members,),
+ res = yield meta.impl(authentication=(message.authentication.members,),
distribution=(message.distribution.global_time,),
payload=("MODIFIED",))
+ returnValue(res)
if allow_text.startswith("Allow=Append"):
meta = message.meta
- return meta.impl(authentication=(message.authentication.members, message.authentication._signatures),
+ res = yield meta.impl(authentication=(message.authentication.members, message.authentication._signatures),
distribution=(message.distribution.global_time,),
payload=(allow_text + "MODIFIED",))
+ returnValue(res)
def split_double_payload(self, payload):
# alice signs until the ","
@@ -232,12 +237,13 @@ def on_text(self, messages):
if not "Dprint=False" in message.payload.text:
self._logger.debug("%s \"%s\" @%d", message, message.payload.text, message.distribution.global_time)
+ @inlineCallbacks
def undo_text(self, descriptors):
"""
Received an undo for a text message.
"""
for member, global_time, packet in descriptors:
- message = packet.load_message()
+ message = yield packet.load_message()
self._logger.debug("undo \"%s\" @%d", message.payload.text, global_time)
def dispersy_cleanup_community(self, message):
diff --git a/tests/debugcommunity/node.py b/tests/debugcommunity/node.py
index 03702af2..855212b7 100644
--- a/tests/debugcommunity/node.py
+++ b/tests/debugcommunity/node.py
@@ -1,12 +1,13 @@
import sys
-from time import time, sleep
+from time import time
import logging
from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.defer import inlineCallbacks, returnValue, maybeDeferred
from twisted.internet.task import deferLater
from twisted.python.threadable import isInIOThread
+from ...taskmanager import TaskManager
from ...bloomfilter import BloomFilter
from ...candidate import Candidate
from ...endpoint import TUNNEL_PREFIX
@@ -18,7 +19,7 @@
from ...util import blocking_call_on_reactor_thread, blockingCallFromThread
-class DebugNode(object):
+class DebugNode(TaskManager):
"""
DebugNode is used to represent an external node/peer while performing unittests.
@@ -30,24 +31,31 @@ class DebugNode(object):
node.init_my_member()
"""
- def __init__(self, testclass, dispersy, communityclass=DebugCommunity, c_master_member=None, curve=u"low"):
+ def __init__(self, testclass, dispersy):
super(DebugNode, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._testclass = testclass
self._dispersy = dispersy
- self._my_member = self._dispersy.get_new_member(curve)
- self._my_pub_member = Member(self._dispersy, self._my_member._ec.pub(), self._my_member.database_id)
+ self._my_member = None
+ self._my_pub_member = None
+ self._central_node = None
+ self._community = None
+ self._tunnel = False
+ self._connection_type = u"unknown"
+
+ @inlineCallbacks
+ def initialize(self, communityclass=DebugCommunity, c_master_member=None, curve=u"low"):
+ self._my_member = yield self._dispersy.get_new_member(curve)
+ self._my_pub_member = Member(self._dispersy, self._my_member._ec.pub(), self._my_member.database_id)
if c_master_member == None:
- self._community = communityclass.create_community(self._dispersy, self._my_member)
+ self._community = yield communityclass.create_community(self._dispersy, self._my_member)
else:
- mm = self._dispersy.get_member(mid=c_master_member._community._master_member.mid)
- self._community = communityclass.init_community(self._dispersy, mm, self._my_member)
+ mm = yield self._dispersy.get_member(mid=c_master_member._community._master_member.mid)
+ self._community = yield communityclass.init_community(self._dispersy, mm, self._my_member)
self._central_node = c_master_member
- self._tunnel = False
- self._connection_type = u"unknown"
@property
def community(self):
@@ -119,14 +127,14 @@ def init_my_member(self, tunnel=False, store_identity=True):
"""
self._tunnel = tunnel
if self._central_node:
- self.send_identity(self._central_node)
+ yield self.send_identity(self._central_node)
# download mm identity, mm authorizing central_node._my_member
- packets = self._central_node.fetch_packets([u"dispersy-identity", u"dispersy-authorize"], self._community.master_member.mid)
- self.give_packets(packets, self._central_node)
+ packets = yield self._central_node.fetch_packets([u"dispersy-identity", u"dispersy-authorize"], self._community.master_member.mid)
+ yield self.give_packets(packets, self._central_node)
# add this node to candidate list of mm
- message = self.create_introduction_request(self._central_node.my_candidate, self.lan_address, self.wan_address, False, u"unknown", None, 1, 1)
+ message = yield self.create_introduction_request(self._central_node.my_candidate, self.lan_address, self.wan_address, False, u"unknown", None, 1, 1)
yield self._central_node.give_message(message, self)
# remove introduction responses from socket
@@ -134,16 +142,21 @@ def init_my_member(self, tunnel=False, store_identity=True):
assert len(messages), "No introduction messages received!"
+ @inlineCallbacks
def encode_message(self, message):
"""
Returns the raw packet after MESSAGE is encoded using the associated community.
"""
assert isinstance(message, Message.Implementation)
- return self._community.get_conversion_for_message(message).encode_message(message)
+ conversion = self._community.get_conversion_for_message(message)
+ res = yield conversion.encode_message(message)
+ returnValue(res)
+ @inlineCallbacks
def give_packet(self, packet, source, cache=False):
- self.give_packets([packet], source, cache=cache)
+ yield self.give_packets([packet], source, cache=cache)
+ @inlineCallbacks
def give_packets(self, packets, source, cache=False):
"""
Give multiple PACKETS directly to Dispersy on_incoming_packets.
@@ -155,11 +168,13 @@ def give_packets(self, packets, source, cache=False):
assert isinstance(cache, bool), type(cache)
self._logger.debug("%s giving %d bytes", self.my_candidate, sum(len(packet) for packet in packets))
- self._dispersy.endpoint.process_packets([(source.lan_address, TUNNEL_PREFIX + packet if source.tunnel else packet) for packet in packets], cache=cache)
+ yield self._dispersy.endpoint.dispersythread_data_came_in([(source.lan_address, TUNNEL_PREFIX + packet if source.tunnel else packet) for packet in packets], time(), cache=cache)
+ @inlineCallbacks
def give_message(self, message, source, cache=False):
- self.give_messages([message], source, cache=cache)
+ yield self.give_messages([message], source, cache=cache)
+ @inlineCallbacks
def give_messages(self, messages, source, cache=False):
"""
Give multiple MESSAGES directly to Dispersy on_incoming_packets after they are encoded.
@@ -169,11 +184,19 @@ def give_messages(self, messages, source, cache=False):
assert all(isinstance(message, Message.Implementation) for message in messages), [type(message) for message in messages]
assert isinstance(cache, bool), type(cache)
- packets = [message.packet if message.packet else self.encode_message(message) for message in messages]
+ packets = []
+ for message in messages:
+ if message.packet:
+ packets.append(message.packet)
+ else:
+ m = yield self.encode_message(message)
+ packets.append(m)
+
self._logger.debug("%s giving %d messages (%d bytes)",
self.my_candidate, len(messages), sum(len(packet) for packet in packets))
- self.give_packets(packets, source, cache=cache)
+ yield self.give_packets(packets, source, cache=cache)
+ @inlineCallbacks
def send_packet(self, packet, candidate):
"""
Sends PACKET to ADDRESS using the nodes' socket.
@@ -182,8 +205,10 @@ def send_packet(self, packet, candidate):
assert isinstance(packet, str)
assert isinstance(candidate, Candidate)
self._logger.debug("%d bytes to %s", len(packet), candidate)
- return self._dispersy.endpoint.send([candidate], [packet])
+ send_result = yield self._dispersy.endpoint.send([candidate], [packet])
+ returnValue(send_result)
+ @inlineCallbacks
def send_message(self, message, candidate):
"""
Sends MESSAGE to ADDRESS using the nodes' socket after it is encoded.
@@ -193,21 +218,23 @@ def send_message(self, message, candidate):
assert isinstance(candidate, Candidate)
self._logger.debug("%s to %s", message.name, candidate)
- self.encode_message(message)
+ yield self.encode_message(message)
- return self.send_packet(message.packet, candidate)
+ res = yield self.send_packet(message.packet, candidate)
+ returnValue(res)
+ @inlineCallbacks
def process_packets(self, timeout=1.0):
"""
Process all packets on the nodes' socket.
"""
timeout = time() + timeout
while timeout > time():
- packets = self._dispersy.endpoint.process_receive_queue()
+ packets = yield self._dispersy.endpoint.process_receive_queue()
if packets:
- return packets
+ returnValue(packets)
else:
- sleep(0.1)
+ yield deferLater(reactor, 0.1, lambda: None)
def drop_packets(self):
"""
@@ -216,6 +243,7 @@ def drop_packets(self):
for address, packet in self._dispersy.endpoint.clear_receive_queue():
self._logger.debug("dropped %d bytes from %s:%d", len(packet), address[0], address[1])
+ @inlineCallbacks
def receive_packet(self, addresses=None, timeout=0.5):
"""
Returns the first matching (candidate, packet) tuple from incoming UDP packets.
@@ -228,6 +256,7 @@ def receive_packet(self, addresses=None, timeout=0.5):
assert isinstance(timeout, (int, float)), type(timeout)
timeout = time() + timeout
+ return_list = []
while timeout > time():
packets = self._dispersy.endpoint.clear_receive_queue()
if packets:
@@ -244,13 +273,17 @@ def receive_packet(self, addresses=None, timeout=0.5):
candidate = Candidate(address, tunnel)
self._logger.debug("%d bytes from %s", len(packet), candidate)
- yield candidate, packet
+ return_list.append((candidate, packet))
+ returnValue(iter(return_list))
else:
- sleep(0.001)
+ yield deferLater(reactor, 0.001, lambda: None)
+ @inlineCallbacks
def receive_packets(self, addresses=None, timeout=0.5):
- return list(self.receive_packet(addresses, timeout))
+ packets = yield self.receive_packet(addresses, timeout)
+ returnValue(list(packets))
+ @inlineCallbacks
def receive_message(self, addresses=None, names=None, timeout=0.5):
"""
Returns the first matching (candidate, message) tuple from incoming UDP packets.
@@ -266,148 +299,201 @@ def receive_message(self, addresses=None, names=None, timeout=0.5):
assert names is None or isinstance(names, list), type(names)
assert names is None or all(isinstance(name, unicode) for name in names), [type(name) for name in names]
- for candidate, packet in self.receive_packet(addresses, timeout):
- try:
- message = self.decode_message(candidate, packet)
- except ConversionNotFoundException as exception:
- self._logger.exception("Ignored %s", exception)
- continue
+ packets = yield self.receive_packet(addresses, timeout)
+ return_list = []
+ if packets:
+ for candidate, packet in packets:
+ try:
+ message = yield self.decode_message(candidate, packet)
+ except ConversionNotFoundException as exception:
+ self._logger.exception("Ignored %s", exception)
+ continue
- if not (names is None or message.name in names):
- self._logger.debug("Ignored %s (%d bytes) from %s", message.name, len(packet), candidate)
- continue
+ if not (names is None or message.name in names):
+ self._logger.debug("Ignored %s (%d bytes) from %s", message.name, len(packet), candidate)
+ continue
- self._logger.debug("%s (%d bytes) from %s", message.name, len(packet), candidate)
- yield candidate, message
+ self._logger.debug("%s (%d bytes) from %s", message.name, len(packet), candidate)
+ return_list.append((candidate, message))
+ returnValue(iter(return_list))
@blocking_call_on_reactor_thread
@inlineCallbacks
def receive_messages(self, addresses=None, names=None, return_after=sys.maxint, timeout=0.5):
messages = []
for _ in xrange(5):
- for message_tuple in self.receive_message(addresses, names, timeout):
- messages.append(message_tuple)
- if len(messages) == return_after:
+ received_messages = yield self.receive_message(addresses, names, timeout)
+ if received_messages:
+ for message_tuple in received_messages:
+ messages.append(message_tuple)
+ if len(messages) == return_after:
+ break
+ if messages:
break
- if messages:
- break
else:
# Wait for a bit and try again
- yield deferLater(reactor, 0.005, lambda : None)
+ yield self.register_task("receive_messages_wait", deferLater(reactor, 0.005, lambda: None))
returnValue(messages)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def decode_message(self, candidate, packet):
- return self._community.get_conversion_for_packet(packet).decode_message(candidate, packet)
+ conversion_for_packet = self._community.get_conversion_for_packet(packet)
+ decoded_message = yield conversion_for_packet.decode_message(candidate, packet)
+ returnValue(decoded_message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def fetch_packets(self, message_names, mid=None):
if mid:
- return [str(packet) for packet, in list(self._dispersy.database.execute(u"SELECT packet FROM sync, member WHERE sync.member = member.id "
+ packets = yield self._dispersy.database.stormdb.fetchall(u"SELECT packet FROM sync, member WHERE sync.member = member.id "
u"AND mid = ? AND meta_message IN (" + ", ".join("?" * len(message_names)) + ") ORDER BY global_time, packet",
- [buffer(mid), ] + [self._community.get_meta_message(name).database_id for name in message_names]))]
- return [str(packet) for packet, in list(self._dispersy.database.execute(u"SELECT packet FROM sync WHERE meta_message IN (" + ", ".join("?" * len(message_names)) + ") ORDER BY global_time, packet",
- [self._community.get_meta_message(name).database_id for name in message_names]))]
+ [buffer(mid), ] + [self._community.get_meta_message(name).database_id for name in message_names])
+ returnValue([str(packet) for packet, in packets])
+ packets = yield self._dispersy.database.stormdb.fetchall(u"SELECT packet FROM sync WHERE meta_message IN (" + ", ".join("?" * len(message_names)) + ") ORDER BY global_time, packet",
+ [self._community.get_meta_message(name).database_id for name in message_names])
+ returnValue([str(packet) for packet, in packets])
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def fetch_messages(self, message_names, mid=None):
"""
Fetch all packets for MESSAGE_NAMES from the database and converts them into
Message.Implementation instances.
"""
- return self._dispersy.convert_packets_to_messages(self.fetch_packets(message_names, mid), community=self._community, verify=False)
+ packets = yield self.fetch_packets(message_names, mid)
+ res = yield self._dispersy.convert_packets_to_messages(packets, community=self._community, verify=False)
+ returnValue(res)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def count_messages(self, message):
- packets_stored, = self._dispersy.database.execute(u"SELECT count(*) FROM sync, member, meta_message WHERE sync.member = member.id AND sync.meta_message = meta_message.id AND sync.community = ? AND mid = ? AND name = ?", (self._community.database_id, buffer(message.authentication.member.mid), message.name)).next()
- return packets_stored
+ packets_stored, = yield self._dispersy.database.stormdb.fetchone(
+ u"""
+ SELECT count(*)
+ FROM sync, member, meta_message
+ WHERE sync.member = member.id AND sync.meta_message = meta_message.id AND sync.community = ?
+ AND mid = ? AND name = ?
+ """, (self._community.database_id, buffer(message.authentication.member.mid), message.name))
+ returnValue(packets_stored)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def assert_is_stored(self, message=None, messages=None):
if messages == None:
messages = [message]
for message in messages:
try:
- undone, packet = self._dispersy.database.execute(u"SELECT undone, packet FROM sync, member WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?",
- (self._community.database_id, buffer(message.authentication.member.mid), message.distribution.global_time)).next()
+ undone, packet = yield self._dispersy.database.stormdb.fetchone(
+ u"""
+ SELECT undone, packet
+ FROM sync, member
+ WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?
+ """,
+ (self._community.database_id, buffer(message.authentication.member.mid),
+ message.distribution.global_time))
self._testclass.assertEqual(undone, 0, "Message is undone")
self._testclass.assertEqual(str(packet), message.packet)
- except StopIteration:
+ except TypeError:
self._testclass.fail("Message is not stored")
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def assert_not_stored(self, message=None, messages=None):
if messages == None:
messages = [message]
for message in messages:
try:
- packet, = self._dispersy.database.execute(u"SELECT packet FROM sync, member WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?",
- (self._community.database_id, buffer(message.authentication.member.mid), message.distribution.global_time)).next()
+ packet, = yield self._dispersy.database.stormdb.fetchone(
+ u"""
+ SELECT packet
+ FROM sync, member
+ WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?
+ """,
+ (self._community.database_id, buffer(message.authentication.member.mid),
+ message.distribution.global_time))
self._testclass.assertNotEqual(str(packet), message.packet)
- except StopIteration:
+ except TypeError:
pass
- assert_is_done = assert_is_stored
-
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def assert_is_undone(self, message=None, messages=None, undone_by=None):
if messages == None:
messages = [message]
for message in messages:
try:
- undone, = self._dispersy.database.execute(u"SELECT undone FROM sync, member WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?",
- (self._community.database_id, buffer(message.authentication.member.mid), message.distribution.global_time)).next()
+ undone, = yield self._dispersy.database.stormdb.fetchone(
+ u"""
+ SELECT undone
+ FROM sync, member
+ WHERE sync.member = member.id AND community = ? AND mid = ? AND global_time = ?
+ """,
+ (self._community.database_id, buffer(message.authentication.member.mid),
+ message.distribution.global_time))
self._testclass.assertGreater(undone, 0, "Message is not undone")
if undone_by:
- undone, = self._dispersy.database.execute(
+ undone, = yield self._dispersy.database.stormdb.fetchone(
u"SELECT packet FROM sync WHERE id = ? ",
- (undone,)).next()
+ (undone,))
self._testclass.assertEqual(str(undone), undone_by.packet)
- except StopIteration:
+ except TypeError:
self._testclass.fail("Message is not stored")
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def assert_count(self, message, count):
- self._testclass.assertEqual(self.count_messages(message), count)
+ if self._dispersy.endpoint.received_packets:
+ yield self.process_packets()
+ message_count = yield self.count_messages(message)
+ self._testclass.assertEqual(message_count, count)
+ @inlineCallbacks
def send_identity(self, other):
- packets = self.fetch_packets([u"dispersy-identity", ], self.my_member.mid)
- other.give_packets(packets, self)
+ packets = yield self.fetch_packets([u"dispersy-identity", ], self.my_member.mid)
+ yield other.give_packets(packets, self)
- packets = other.fetch_packets([u"dispersy-identity", ], other.my_member.mid)
- self.give_packets(packets, other)
+ packets = yield other.fetch_packets([u"dispersy-identity", ], other.my_member.mid)
+ yield self.give_packets(packets, other)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def take_step(self):
- self._community.take_step()
+ yield self._community.take_step()
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def claim_global_time(self):
- return self._community.claim_global_time()
+ claimed_global_time = yield self._community.claim_global_time()
+ returnValue(claimed_global_time)
@blocking_call_on_reactor_thread
def get_resolution_policy(self, meta, global_time):
return self._community.timeline.get_resolution_policy(meta, global_time)
+ @inlineCallbacks
def call(self, func, *args, **kargs):
# TODO(emilon): timeout is not supported anymore, clean the tests so they don't pass the named argument.
if isInIOThread():
- return func(*args, **kargs)
+ func_result = yield maybeDeferred(func, *args, **kargs)
+ returnValue(func_result)
else:
- return blockingCallFromThread(reactor, func, *args, **kargs)
+ returnValue(blockingCallFromThread(reactor, func, *args, **kargs))
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def store(self, messages):
- self._dispersy._store(messages)
+ yield self._dispersy._store(messages)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_authorize(self, permission_triplets, global_time=None, sequence_number=None):
"""
Returns a new dispersy-authorize message.
@@ -415,53 +501,60 @@ def create_authorize(self, permission_triplets, global_time=None, sequence_numbe
meta = self._community.get_meta_message(u"dispersy-authorize")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time, sequence_number),
payload=(permission_triplets,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_revoke(self, permission_triplets, global_time=None, sequence_number=None):
meta = self._community.get_meta_message(u"dispersy-revoke")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time, sequence_number),
payload=(permission_triplets,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_dynamic_settings(self, policies, global_time=None, sequence_number=None):
meta = self._community.get_meta_message(u"dispersy-dynamic-settings")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- message = meta.impl(authentication=(self.my_member,),
+ message = yield meta.impl(authentication=(self.my_member,),
distribution=(global_time, sequence_number),
payload=(policies,))
- return message
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_destroy_community(self, degree, global_time=None):
meta = self._community.get_meta_message(u"dispersy-destroy-community")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=((self._my_member),),
+ message = yield meta.impl(authentication=((self._my_member),),
distribution=(global_time,),
payload=(degree,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_identity(self, global_time=None):
"""
Returns a new dispersy-identity message.
@@ -469,11 +562,13 @@ def create_identity(self, global_time=None):
meta = self._community.get_meta_message(u"dispersy-identity")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=(self._my_member,), distribution=(global_time,))
+ message = yield meta.impl(authentication=(self._my_member,), distribution=(global_time,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_undo_own(self, message, global_time=None, sequence_number=None):
"""
Returns a new dispersy-undo-own message.
@@ -482,15 +577,17 @@ def create_undo_own(self, message, global_time=None, sequence_number=None):
meta = self._community.get_meta_message(u"dispersy-undo-own")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time, sequence_number),
payload=(message.authentication.member, message.distribution.global_time, message))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_undo_other(self, message, global_time=None, sequence_number=None):
"""
Returns a new dispersy-undo-other message.
@@ -498,15 +595,17 @@ def create_undo_other(self, message, global_time=None, sequence_number=None):
meta = self._community.get_meta_message(u"dispersy-undo-other")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time, sequence_number),
payload=(message.authentication.member, message.distribution.global_time, message))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_missing_identity(self, dummy_member=None, global_time=None):
"""
Returns a new dispersy-missing-identity message.
@@ -515,12 +614,14 @@ def create_missing_identity(self, dummy_member=None, global_time=None):
meta = self._community.get_meta_message(u"dispersy-missing-identity")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,),
+ message = yield meta.impl(distribution=(global_time,),
payload=(dummy_member.mid,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_missing_sequence(self, missing_member, missing_message, missing_sequence_low, missing_sequence_high, global_time=None):
"""
Returns a new dispersy-missing-sequence message.
@@ -532,12 +633,14 @@ def create_missing_sequence(self, missing_member, missing_message, missing_seque
meta = self._community.get_meta_message(u"dispersy-missing-sequence")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,),
+ message = yield meta.impl(distribution=(global_time,),
payload=(missing_member, missing_message, missing_sequence_low, missing_sequence_high))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_signature_request(self, identifier, message, global_time=None):
"""
Returns a new dispersy-signature-request message.
@@ -546,11 +649,14 @@ def create_signature_request(self, identifier, message, global_time=None):
meta = self._community.get_meta_message(u"dispersy-signature-request")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,), payload=(identifier, message,))
+ message = yield meta.impl(distribution=(global_time,), payload=(identifier, message,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
+ # TODO(Laurens): This method is never called inside dispersy.
def create_signature_response(self, identifier, message, global_time=None):
"""
Returns a new dispersy-missing-response message.
@@ -561,12 +667,14 @@ def create_signature_response(self, identifier, message, global_time=None):
meta = self._community.get_meta_message(u"dispersy-signature-response")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,),
+ message = yield meta.impl(distribution=(global_time,),
payload=(identifier, message))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_missing_message(self, missing_member, missing_global_times, global_time=None):
"""
Returns a new dispersy-missing-message message.
@@ -576,12 +684,14 @@ def create_missing_message(self, missing_member, missing_global_times, global_ti
meta = self._community.get_meta_message(u"dispersy-missing-message")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,),
+ message = yield meta.impl(distribution=(global_time,),
payload=(missing_member, missing_global_times))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_missing_proof(self, member, global_time=None):
"""
Returns a new dispersy-missing-proof message.
@@ -590,11 +700,13 @@ def create_missing_proof(self, member, global_time=None):
meta = self._community.get_meta_message(u"dispersy-missing-proof")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(distribution=(global_time,), payload=(member, global_time))
+ message = yield meta.impl(distribution=(global_time,), payload=(member, global_time))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def create_introduction_request(self, destination, source_lan, source_wan, advice, connection_type, sync, identifier, global_time=None):
"""
Returns a new dispersy-introduction-request message.
@@ -623,13 +735,16 @@ def create_introduction_request(self, destination, source_lan, source_wan, advic
meta = self._community.get_meta_message(u"dispersy-introduction-request")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time,),
payload=(destination.sock_addr, source_lan, source_wan, advice, connection_type, sync, identifier))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
+ # TODO(Laurens): This method is never callers/referenced in dispersy?
def create_introduction_response(self, destination, source_lan, source_wan, introduction_lan, introduction_wan, connection_type, tunnel, identifier, global_time=None):
"""
Returns a new dispersy-introduction-request message.
@@ -646,14 +761,16 @@ def create_introduction_response(self, destination, source_lan, source_wan, intr
meta = self._community.get_meta_message(u"dispersy-introduction-response")
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
destination=(destination,),
distribution=(global_time,),
payload=(destination.sock_addr, source_lan, source_wan, introduction_lan, introduction_wan, connection_type, tunnel, identifier))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def _create_text(self, message_name, text, global_time=None, resolution=(), destination=()):
assert isinstance(message_name, unicode), type(message_name)
assert isinstance(text, str), type(text)
@@ -663,15 +780,17 @@ def _create_text(self, message_name, text, global_time=None, resolution=(), dest
meta = self._community.get_meta_message(message_name)
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
resolution=resolution,
distribution=(global_time,),
destination=destination,
payload=(text,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def _create_sequence_text(self, message_name, text, global_time=None, sequence_number=None):
assert isinstance(message_name, unicode)
assert isinstance(text, str)
@@ -679,15 +798,17 @@ def _create_sequence_text(self, message_name, text, global_time=None, sequence_n
meta = self._community.get_meta_message(message_name)
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
if sequence_number == None:
sequence_number = meta.distribution.claim_sequence_number()
- return meta.impl(authentication=(self._my_member,),
+ message = yield meta.impl(authentication=(self._my_member,),
distribution=(global_time, sequence_number),
payload=(text,))
+ returnValue(message)
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def _create_doublemember_text(self, message_name, other, text, global_time=None):
assert isinstance(message_name, unicode)
assert isinstance(other, Member)
@@ -696,128 +817,167 @@ def _create_doublemember_text(self, message_name, other, text, global_time=None)
# As each node has a separate database, a member instance from a node representing identity A can have the same
# database ID than one from a different node representing identity B, get our own member object based on
# `other`'s member ID to avoid this.
- my_other = self._dispersy.get_member(mid=other.mid)
+ my_other = yield self._dispersy.get_member(mid=other.mid)
meta = self._community.get_meta_message(message_name)
if global_time == None:
- global_time = self.claim_global_time()
+ global_time = yield self.claim_global_time()
- return meta.impl(authentication=([self._my_member, my_other],),
+ message = yield meta.impl(authentication=([self._my_member, my_other],),
distribution=(global_time,),
payload=(text,))
+ returnValue(message)
+ @inlineCallbacks
def create_last_1_test(self, text, global_time=None):
"""
Returns a new last-1-test message.
"""
- return self._create_text(u"last-1-test", text, global_time)
+ text = yield self._create_text(u"last-1-test", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_last_9_test(self, text, global_time=None):
"""
Returns a new last-9-test message.
"""
- return self._create_text(u"last-9-test", text, global_time)
+ text = yield self._create_text(u"last-9-test", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_last_1_doublemember_text(self, other, text, global_time=None):
"""
Returns a new last-1-doublemember-text message.
"""
- return self._create_doublemember_text(u"last-1-doublemember-text", other, text, global_time)
+ text = yield self._create_doublemember_text(u"last-1-doublemember-text", other, text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_double_signed_text(self, other, text, global_time=None):
"""
Returns a new double-signed-text message.
"""
- return self._create_doublemember_text(u"double-signed-text", other, text, global_time)
+ text = yield self._create_doublemember_text(u"double-signed-text", other, text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_double_signed_split_payload_text(self, other, text, global_time=None):
"""
Returns a new double-signed-text-split message.
"""
- return self._create_doublemember_text(u"double-signed-text-split", other, text, global_time)
+ text = yield self._create_doublemember_text(u"double-signed-text-split", other, text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_full_sync_text(self, text, global_time=None):
"""
Returns a new full-sync-text message.
"""
- return self._create_text(u"full-sync-text", text, global_time)
+ text = yield self._create_text(u"full-sync-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_bin_key_text(self, text, global_time=None):
"""
Returns a new full-sync-text message.
"""
- return self._create_text(u"bin-key-text", text, global_time)
+ text = yield self._create_text(u"bin-key-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_targeted_full_sync_text(self, text, destination, global_time=None):
"""
Returns a new targeted-full-sync-text message.
"""
- return self._create_text(u"full-sync-text", text, destination=destination, global_time=global_time)
+ text = yield self._create_text(u"full-sync-text", text, destination=destination, global_time=global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_full_sync_global_time_pruning_text(self, text, global_time=None):
"""
Returns a new full-sync-global-time-pruning-text message.
"""
- return self._create_text(u"full-sync-global-time-pruning-text", text, global_time)
+ text = yield self._create_text(u"full-sync-global-time-pruning-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_in_order_text(self, text, global_time=None):
"""
Returns a new ASC-text message.
"""
- return self._create_text(u"ASC-text", text, global_time)
+ text = yield self._create_text(u"ASC-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_out_order_text(self, text, global_time=None):
"""
Returns a new DESC-text message.
"""
- return self._create_text(u"DESC-text", text, global_time)
+ text = yield self._create_text(u"DESC-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_protected_full_sync_text(self, text, global_time=None):
"""
Returns a new protected-full-sync-text message.
"""
- return self._create_text(u"protected-full-sync-text", text, global_time)
+ text = yield self._create_text(u"protected-full-sync-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_dynamic_resolution_text(self, text, policy, global_time=None):
"""
Returns a new dynamic-resolution-text message.
"""
assert isinstance(policy, (PublicResolution.Implementation, LinearResolution.Implementation)), type(policy)
- return self._create_text(u"dynamic-resolution-text", text, global_time, resolution=(policy,))
+ text = yield self._create_text(u"dynamic-resolution-text", text, global_time, resolution=(policy,))
+ returnValue(text)
+ @inlineCallbacks
def create_sequence_text(self, text, global_time=None, sequence_number=None):
"""
Returns a new sequence-text message.
"""
- return self._create_sequence_text(u"sequence-text", text, global_time, sequence_number)
+ text = yield self._create_sequence_text(u"sequence-text", text, global_time, sequence_number)
+ returnValue(text)
+ @inlineCallbacks
def create_high_priority_text(self, text, global_time=None):
"""
Returns a new high-priority-text message.
"""
- return self._create_text(u"high-priority-text", text, global_time)
+ text = yield self._create_text(u"high-priority-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_low_priority_text(self, text, global_time=None):
"""
Returns a new low-priority-text message.
"""
- return self._create_text(u"low-priority-text", text, global_time)
+ text = yield self._create_text(u"low-priority-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_medium_priority_text(self, text, global_time=None):
"""
Returns a new medium-priority-text message.
"""
- return self._create_text(u"medium-priority-text", text, global_time)
+ text = yield self._create_text(u"medium-priority-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_random_order_text(self, text, global_time=None):
"""
Returns a new RANDOM-text message.
"""
- return self._create_text(u"RANDOM-text", text, global_time)
+ text = yield self._create_text(u"RANDOM-text", text, global_time)
+ returnValue(text)
+ @inlineCallbacks
def create_batched_text(self, text, global_time=None):
"""
Returns a new BATCHED-text message.
"""
- return self._create_text(u"batched-text", text, global_time)
+ text = yield self._create_text(u"batched-text", text, global_time)
+ returnValue(text)
diff --git a/tests/dispersytestclass.py b/tests/dispersytestclass.py
index ce3106dc..637d7bc2 100644
--- a/tests/dispersytestclass.py
+++ b/tests/dispersytestclass.py
@@ -1,18 +1,17 @@
-import os
import logging
-from unittest import TestCase
+import os
from tempfile import mkdtemp
+from unittest import TestCase
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
+from .debugcommunity.community import DebugCommunity
+from .debugcommunity.node import DebugNode
from ..discovery.community import PEERCACHE_FILENAME
from ..dispersy import Dispersy
-from ..endpoint import ManualEnpoint
+from ..endpoint import ManualEnpoint, TUNNEL_PREFIX
from ..util import blockingCallFromThread
-from .debugcommunity.community import DebugCommunity
-from .debugcommunity.node import DebugNode
-
# use logger.conf if it exists
if os.path.exists("logger.conf"):
@@ -63,6 +62,27 @@ def tearDown(self):
self._logger.warning("Failing")
assert not pending, "The reactor was not clean after shutting down all dispersy instances."
+ @inlineCallbacks
+ def send_packet(self, candidate, packet, prefix=None):
+ packet = (prefix or '') + packet
+
+ if len(packet) > 2 ** 16 - 60:
+ raise RuntimeError("UDP does not support %d byte packets" % len(packet))
+
+ data = TUNNEL_PREFIX + packet if candidate.tunnel else packet
+
+ for a_node in self.nodes:
+ complete_packets = []
+ if candidate == a_node.my_candidate:
+ complete_packets.append((candidate.sock_addr, data))
+ if complete_packets:
+ yield a_node._dispersy.endpoint.data_came_in(complete_packets)
+ returnValue(True)
+
+ def patch_send_packet_for_nodes(self):
+ for node in self.nodes:
+ node._dispersy.endpoint.send_packet = self.send_packet
+
def create_nodes(self, amount=1, store_identity=True, tunnel=False, community_class=DebugCommunity,
autoload_discovery=False, memory_database=True):
"""
@@ -87,11 +107,12 @@ def _create_nodes(amount, store_identity, tunnel, communityclass, autoload_disco
working_directory = unicode(mkdtemp(suffix="_dispersy_test_session"))
dispersy = Dispersy(ManualEnpoint(0), working_directory, **memory_database_argument)
- dispersy.start(autoload_discovery=autoload_discovery)
+ yield dispersy.initialize_statistics()
+ yield dispersy.start(autoload_discovery=autoload_discovery)
self.dispersy_objects.append(dispersy)
- node = self._create_node(dispersy, communityclass, self._mm)
+ node = yield self._create_node(dispersy, communityclass, self._mm)
yield node.init_my_member(tunnel=tunnel, store_identity=store_identity)
nodes.append(node)
@@ -101,5 +122,8 @@ def _create_nodes(amount, store_identity, tunnel, communityclass, autoload_disco
return blockingCallFromThread(reactor, _create_nodes, amount, store_identity, tunnel, community_class,
autoload_discovery, memory_database)
+ @inlineCallbacks
def _create_node(self, dispersy, community_class, c_master_member):
- return DebugNode(self, dispersy, community_class, c_master_member)
+ node = DebugNode(self, dispersy)
+ yield node.initialize(community_class, c_master_member)
+ returnValue(node)
diff --git a/tests/test_batch.py b/tests/test_batch.py
index 90b45413..29ddc9f3 100644
--- a/tests/test_batch.py
+++ b/tests/test_batch.py
@@ -1,4 +1,8 @@
-from time import time, sleep
+from time import time
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
from .dispersytestclass import DispersyTestFunc
@@ -10,37 +14,50 @@ def __init__(self, *args, **kargs):
self._big_batch_took = 0.0
self._small_batches_took = 0.0
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_one_batch(self):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
+
+ messages = []
+ for i in range(10):
+ created_batched_text = yield node.create_batched_text("duplicates", i + 10)
+ messages.append(created_batched_text)
- messages = [node.create_batched_text("duplicates", i + 10) for i in range(10)]
- other.give_messages(messages, node, cache=True)
+ yield other.give_messages(messages, node, cache=True)
# no messages may be in the database, as they need to be batched
- other.assert_count(messages[0], 0)
+ yield other.assert_count(messages[0], 0)
- sleep(messages[0].meta.batch.max_window + 1.0)
+ yield deferLater(reactor, messages[0].meta.batch.max_window + 1.0, lambda: None)
# all of the messages must be stored in the database, as batch_window expired
- other.assert_count(messages[0], 10)
+ yield other.assert_count(messages[0], 10)
+ @deferred(timeout=20)
+ @inlineCallbacks
def test_multiple_batch(self):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
+
+ messages = []
+ for i in range(10):
+ created_batched_text = yield node.create_batched_text("duplicates", i + 10)
+ messages.append(created_batched_text)
- messages = [node.create_batched_text("duplicates", i + 10) for i in range(10)]
for message in messages:
- other.give_message(message, node, cache=True)
+ yield other.give_message(message, node, cache=True)
# no messages may be in the database, as they need to be batched
- other.assert_count(message, 0)
-
- sleep(messages[0].meta.batch.max_window + 1.0)
+ yield other.assert_count(message, 0)
+ yield deferLater(reactor, messages[0].meta.batch.max_window + 1.0, lambda: None)
# all of the messages must be stored in the database, as batch_window expired
- other.assert_count(messages[0], 10)
+ yield other.assert_count(messages[0], 10)
+ @deferred(timeout=20)
+ @inlineCallbacks
def test_one_big_batch(self, length=1000):
"""
Test that one big batch of messages is processed correctly.
@@ -48,22 +65,26 @@ def test_one_big_batch(self, length=1000):
we make one large batch (using one community) and many small batches (using many different
communities).
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
- messages = [node.create_full_sync_text("Dprint=False, big batch #%d" % global_time, global_time)
- for global_time in xrange(10, 10 + length)]
+ messages = []
+ for global_time in xrange(10, 10 + length):
+ created_full_sync_text = yield node.create_full_sync_text("Dprint=False, big batch #%d" % global_time, global_time)
+ messages.append(created_full_sync_text)
begin = time()
- other.give_messages(messages, node)
+ yield other.give_messages(messages, node)
end = time()
self._big_batch_took = end - begin
- other.assert_count(messages[0], len(messages))
+ yield other.assert_count(messages[0], len(messages))
if self._big_batch_took and self._small_batches_took:
self.assertSmaller(self._big_batch_took, self._small_batches_took * 1.1)
+ @deferred(timeout=40)
+ @inlineCallbacks
def test_many_small_batches(self, length=1000):
"""
Test that many small batches of messages are processed correctly.
@@ -71,19 +92,22 @@ def test_many_small_batches(self, length=1000):
we make one large batch (using one community) and many small batches (using many different
communities).
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
- messages = [node.create_full_sync_text("Dprint=False, big batch #%d" % global_time, global_time)
- for global_time in xrange(10, 10 + length)]
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
+
+ messages = []
+ for global_time in xrange(10, 10 + length):
+ created_full_sync_text = yield node.create_full_sync_text("Dprint=False, big batch #%d" % global_time, global_time)
+ messages.append(created_full_sync_text)
begin = time()
for message in messages:
- other.give_message(message, node)
+ yield other.give_message(message, node)
end = time()
self._small_batches_took = end - begin
- other.assert_count(messages[0], len(messages))
+ yield other.assert_count(messages[0], len(messages))
if self._big_batch_took and self._small_batches_took:
self.assertSmaller(self._big_batch_took, self._small_batches_took * 1.1)
diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py
index f622ceac..2347ed0d 100644
--- a/tests/test_bootstrap.py
+++ b/tests/test_bootstrap.py
@@ -1,3 +1,4 @@
+import unittest
from collections import defaultdict
from copy import copy
from os import environ, getcwd, path
@@ -8,7 +9,7 @@
from unittest import skip, skipUnless
import logging
-from nose.twistedtools import reactor
+from nose.twistedtools import reactor, deferred
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twisted.internet.task import deferLater
@@ -29,6 +30,8 @@
class TestBootstrapServers(DispersyTestFunc):
+ @unittest.skip("Very unstable, ERRORS > 80% of the time")
+ @inlineCallbacks
def test_tracker(self):
"""
Runs tracker.py and connects to it.
@@ -84,30 +87,32 @@ def dispersy_enable_candidate_walker(self):
def dispersy_enable_candidate_walker_responses(self):
return True
- node, = self.create_nodes(1, community_class=Community, autoload_discovery=True)
+ node, = yield self.create_nodes(1, community_class=Community, autoload_discovery=True)
# node sends introduction request
destination = Candidate(tracker_address, False)
- node.send_message(node.create_introduction_request(destination=destination,
+ created_introduction_request = yield node.create_introduction_request(destination=destination,
source_lan=node.lan_address,
source_wan=node.wan_address,
advice=True,
connection_type=u"unknown",
sync=None,
identifier=4242,
- global_time=42),
- destination)
+ global_time=42)
+ yield node.send_message(created_introduction_request, destination)
# node receives missing identity
- _, message = node.receive_message(names=[u"dispersy-missing-identity"]).next()
+ received_messsage = yield node.receive_message(names=[u"dispersy-missing-identity"])
+ _, message = received_messsage.next()
self.assertEqual(message.payload.mid, node.my_member.mid)
- packet = node.fetch_packets([u"dispersy-identity", ], node.my_member.mid)[0]
- node.send_packet(packet, destination)
+ packets = yield node.fetch_packets([u"dispersy-identity", ], node.my_member.mid)
+ packet = packets[0]
+ yield node.send_packet(packet, destination)
- node.process_packets()
-
- _, message = node.receive_message(names=[u"dispersy-identity"]).next()
+ yield node.process_packets()
+ received_messsage = yield node.receive_message(names=[u"dispersy-identity"])
+ _, message = received_messsage.next()
finally:
self._logger.debug("terminate tracker")
@@ -147,7 +152,7 @@ def start_walking(self):
self._pcandidates = [self.get_candidate(address) for address in
set(self._dispersy._discovery_community.bootstrap.candidate_addresses)]
break
- yield deferLater(reactor, 1, lambda: None)
+ sleep(1)
else:
test.fail("No candidates discovered")
@@ -158,8 +163,8 @@ def start_walking(self):
self._pcandidates.sort(cmp=lambda a, b: cmp(a.sock_addr, b.sock_addr))
for _ in xrange(PING_COUNT):
- self.ping(time())
- yield deferLater(reactor, 1, lambda: None)
+ yield self.ping(time())
+ sleep(1)
self.summary()
self.test_d.callback(None)
@@ -174,11 +179,12 @@ def on_introduction_response(self, messages):
self._identifiers[candidate.sock_addr] = message.authentication.member.mid
return super(DebugCommunity, self).on_introduction_response(messages)
+ @inlineCallbacks
def ping(self, now):
self._logger.debug("PING")
self._pings_done += 1
for candidate in self._pcandidates:
- request = self.create_introduction_request(candidate, False)
+ request = yield self.create_introduction_request(candidate, False)
self._request[candidate.sock_addr][request.payload.identifier] = now
def summary(self):
@@ -234,11 +240,13 @@ def finish(self, request_count, min_response_count, max_rtt):
@inlineCallbacks
def do_pings():
dispersy = Dispersy(StandaloneEndpoint(0), u".", u":memory:")
- dispersy.start(autoload_discovery=True)
+ yield dispersy.initialize_statistics()
+ yield dispersy.start(autoload_discovery=True)
self.dispersy_objects.append(dispersy)
- community = PingCommunity.create_community(dispersy, dispersy.get_new_member())
+ new_dispersy_member = yield dispersy.get_new_member()
+ community = yield PingCommunity.create_community(dispersy, new_dispersy_member)
yield community.test_d
- dispersy.stop()
+ yield dispersy.stop()
returnValue(community)
assert_margin = 0.9
@@ -247,6 +255,8 @@ def do_pings():
# TODO(emilon): port this to twisted
@skip("The stress test is not actually a unittest")
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_perform_heavy_stress_test(self):
"""
Sends many a dispersy-introduction-request messages to a single tracker and counts how long
@@ -323,27 +333,31 @@ def check_introduction_response(self, messages):
yield DropMessage(message, "not doing anything in this script")
+ @inlineCallbacks
def prepare_ping(self, member):
self._my_member = member
try:
for candidate in self._pcandidates:
- request = self._dispersy.create_introduction_request(self, candidate, False, forward=False)
+ request = yield self._dispersy.create_introduction_request(self, candidate, False, forward=False)
self._queue.append((request.payload.identifier, request.packet, candidate))
finally:
self._my_member = self._original_my_member
+ @inlineCallbacks
def ping_from_queue(self, count):
for identifier, packet, candidate in self._queue[:count]:
- self._dispersy.endpoint.send([candidate], [packet])
+ yield self._dispersy.endpoint.send([candidate], [packet])
self._request[candidate.sock_addr][identifier] = time()
self._queue = self._queue[count:]
+ # TODO(Laurens): This method never gets called.
+ @inlineCallbacks
def ping(self, member):
self._my_member = member
try:
for candidate in self._pcandidates:
- request = self._dispersy.create_introduction_request(self, candidate, False)
+ request = yield self._dispersy.create_introduction_request(self, candidate, False)
self._request[candidate.sock_addr][request.payload.identifier] = time()
finally:
self._my_member = self._original_my_member
@@ -369,9 +383,15 @@ def summary(self):
self._logger.info("prepare communities, members, etc")
with self._dispersy.database:
candidates = [Candidate(("130.161.211.245", 6429), False)]
- communities = [PingCommunity.create_community(self._dispersy, self._my_member, candidates)
- for _ in xrange(COMMUNITIES)]
- members = [self._dispersy.get_new_member(u"low") for _ in xrange(MEMBERS)]
+ communities = []
+ for _ in xrange(COMMUNITIES):
+ community = yield PingCommunity.create_community(self._dispersy, self._my_member, candidates)
+ communities.append(community)
+
+ members = []
+ for _ in xrange(MEMBERS):
+ new_dispersy_member = yield self._dispersy.get_new_member(u"low")
+ members.append(new_dispersy_member)
for community in communities:
for member in members:
@@ -381,17 +401,17 @@ def summary(self):
for _ in xrange(ROUNDS):
for community in communities:
for member in members:
- community.prepare_ping(member)
+ yield community.prepare_ping(member)
sleep(5)
- sleep(15)
+ sleep(5)
self._logger.info("ping-ping")
BEGIN = time()
for _ in xrange(ROUNDS):
for community in communities:
for _ in xrange(MEMBERS / 100):
- community.ping_from_queue(100)
+ yield community.ping_from_queue(100)
sleep(0.1)
for community in communities:
@@ -405,5 +425,6 @@ def summary(self):
community.summary()
# cleanup
- community.create_destroy_community(u"hard-kill")
- self._dispersy.get_community(community.cid).unload_community()
+ yield community.create_destroy_community(u"hard-kill")
+ community = yield self._dispersy.get_community(community.cid)
+ community.unload_community()
diff --git a/tests/test_candidates.py b/tests/test_candidates.py
index ce179b2c..acd38ca4 100644
--- a/tests/test_candidates.py
+++ b/tests/test_candidates.py
@@ -10,6 +10,9 @@
from itertools import combinations, islice
from time import time
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from ..candidate import CANDIDATE_ELIGIBLE_DELAY
from ..tracker.community import TrackerCommunity
from ..util import blocking_call_on_reactor_thread
@@ -353,6 +356,7 @@ def generator():
with community.dispersy.database:
return list(generator())
+ @inlineCallbacks
def set_timestamps(self, candidates, all_flags):
assert isinstance(candidates, list)
assert isinstance(all_flags, list)
@@ -361,10 +365,11 @@ def set_timestamps(self, candidates, all_flags):
for flags, candidate in zip(all_flags, candidates):
member = [None]
+ @inlineCallbacks
def get_member():
if not member[0]:
- member[0] = self._dispersy.get_new_member(u"very-low")
- return member[0]
+ member[0] = yield self._dispersy.get_new_member(u"very-low")
+ returnValue(member[0])
if "w" in flags:
# SELF has performed an outgoing walk to CANDIDATE
@@ -373,20 +378,23 @@ def get_member():
if "r" in flags:
# SELF has received an incoming walk response from CANDIDATE
- candidate.associate(get_member())
+ new_dispersy_member = yield get_member()
+ candidate.associate(new_dispersy_member)
candidate.walk_response(now)
assert candidate.last_walk_reply == now
if "e" in flags:
# CANDIDATE_ELIGIBLE_DELAY seconds ago SELF performed a successful walk to CANDIDATE
- candidate.associate(get_member())
+ new_dispersy_member = yield get_member()
+ candidate.associate(new_dispersy_member)
candidate.walk(now - CANDIDATE_ELIGIBLE_DELAY)
candidate.walk_response(now)
assert candidate.last_walk_reply == now, (candidate.last_walk_reply)
if "s" in flags:
# SELF has received an incoming walk request from CANDIDATE
- candidate.associate(get_member())
+ new_dispersy_member = yield get_member()
+ candidate.associate(new_dispersy_member)
candidate.stumble(now)
assert candidate.last_stumble == now
@@ -400,7 +408,7 @@ def get_member():
candidate.discovered(now)
assert candidate.last_discovered == now
- return now
+ returnValue(now)
def select_candidates(self, candidates, all_flags):
def filter_func(flags):
@@ -477,6 +485,7 @@ def filter_func(flags, candidate):
return [candidate for flags, candidate in zip(all_flags, candidates) if filter_func(flags, candidate)]
@blocking_call_on_reactor_thread
+ @inlineCallbacks
def check_candidates(self, all_flags):
assert isinstance(all_flags, list)
assert all(isinstance(flags, str) for flags in all_flags)
@@ -502,18 +511,18 @@ def compare(selection, actual):
assert isinstance(max_calls, int)
assert isinstance(max_iterations, int)
assert len(all_flags) < max_iterations
- community = NoBootstrapDebugCommunity.create_community(self._dispersy, self._mm._my_member)
+ community = yield NoBootstrapDebugCommunity.create_community(self._dispersy, self._mm._my_member)
candidates = self.create_candidates(community, all_flags)
# yield_candidates
- self.set_timestamps(candidates, all_flags)
+ yield self.set_timestamps(candidates, all_flags)
selection = self.select_candidates(candidates, all_flags)
actual_list = [islice(community.dispersy_yield_candidates(), max_iterations) for _ in xrange(max_calls)]
for actual in actual_list:
compare(selection, actual)
# yield_verified_candidates
- self.set_timestamps(candidates, all_flags)
+ yield self.set_timestamps(candidates, all_flags)
selection = self.select_verified_candidates(candidates, all_flags)
actual_list = [islice(community.dispersy_yield_verified_candidates(), max_iterations)
for _ in xrange(max_calls)]
@@ -521,13 +530,13 @@ def compare(selection, actual):
compare(selection, actual)
# get_introduce_candidate (no exclusion)
- self.set_timestamps(candidates, all_flags)
+ yield self.set_timestamps(candidates, all_flags)
selection = self.select_introduce_candidates(candidates, all_flags) or [None]
actual = [community.dispersy_get_introduce_candidate() for _ in xrange(max_calls)]
compare(selection, actual)
# get_introduce_candidate (with exclusion)
- self.set_timestamps(candidates, all_flags)
+ yield self.set_timestamps(candidates, all_flags)
for candidate in candidates:
selection = self.select_introduce_candidates(candidates, all_flags, candidate) or [None]
actual = [community.dispersy_get_introduce_candidate(candidate) for _ in xrange(max_calls)]
@@ -536,7 +545,7 @@ def compare(selection, actual):
# get_walk_candidate
# Note that we must perform the CANDIDATE.WALK to ensure this candidate is not iterated again. Because of this,
# this test must be done last.
- self.set_timestamps(candidates, all_flags)
+ yield self.set_timestamps(candidates, all_flags)
selection = self.select_walk_candidates(candidates, all_flags)
for _ in xrange(len(selection)):
candidate = community.dispersy_get_walk_candidate()
@@ -549,25 +558,27 @@ def compare(selection, actual):
candidate = community.dispersy_get_walk_candidate()
self.assertEquals(candidate, None)
- @blocking_call_on_reactor_thread
- def test_get_introduce_candidate(self, community_create_method=DebugCommunity.create_community):
- community = community_create_method(self._dispersy, self._community._my_member)
- candidates = self.create_candidates(community, [""] * 5)
- expected = [None, ("127.0.0.1", 1), ("127.0.0.1", 2), ("127.0.0.1", 3), ("127.0.0.1", 4)]
- now = time()
- got = []
- for candidate in candidates:
- candidate.associate(self._dispersy.get_new_member(u"very-low"))
- candidate.stumble(now)
- introduce = community.dispersy_get_introduce_candidate(candidate)
- got.append(introduce.sock_addr if introduce else None)
- self.assertEquals(expected, got)
+ @deferred(timeout=10)
+ @inlineCallbacks
+ def test_tracker_get_introduce_candidate(self, community_create_method=TrackerCommunity.create_community):
+ @inlineCallbacks
+ def get_introduce_candidate(self, community_create_method=DebugCommunity.create_community):
+ community = yield community_create_method(self._dispersy, self._community._my_member)
+ candidates = self.create_candidates(community, [""] * 5)
+ expected = [None, ("127.0.0.1", 1), ("127.0.0.1", 2), ("127.0.0.1", 3), ("127.0.0.1", 4)]
+ now = time()
+ got = []
+ for candidate in candidates:
+ new_dispersy_member = yield self._dispersy.get_new_member(u"very-low")
+ candidate.associate(new_dispersy_member)
+ candidate.stumble(now)
+ introduce = community.dispersy_get_introduce_candidate(candidate)
+ got.append(introduce.sock_addr if introduce else None)
+ self.assertEquals(expected, got)
- return community, candidates
+ returnValue((community, candidates))
- @blocking_call_on_reactor_thread
- def test_tracker_get_introduce_candidate(self, community_create_method=TrackerCommunity.create_community):
- community, candidates = self.test_get_introduce_candidate(community_create_method)
+ community, candidates = yield get_introduce_candidate(self, community_create_method)
# trackers should not prefer either stumbled or walked candidates, i.e. it should not return
# candidate 1 more than once/in the wrong position
@@ -580,10 +591,11 @@ def test_tracker_get_introduce_candidate(self, community_create_method=TrackerCo
got.append(introduce.sock_addr if introduce else None)
self.assertEquals(expected, got)
- @blocking_call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_introduction_probabilities(self):
candidates = self.create_candidates(self._community, ["wr", "s"])
- self.set_timestamps(candidates, ["wr", "s"])
+ yield self.set_timestamps(candidates, ["wr", "s"])
# fetch candidates
returned_walked_candidate = 0
@@ -594,10 +606,11 @@ def test_introduction_probabilities(self):
assert returned_walked_candidate in expected_walked_range
- @blocking_call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_walk_probabilities(self):
candidates = self.create_candidates(self._community, ["e", "s", "i", "d"])
- self.set_timestamps(candidates, ["e", "s", "i", "d"])
+ yield self.set_timestamps(candidates, ["e", "s", "i", "d"])
# fetch candidates
returned_walked_candidate = 0
diff --git a/tests/test_classification.py b/tests/test_classification.py
index 497f9ee5..acfa37fa 100644
--- a/tests/test_classification.py
+++ b/tests/test_classification.py
@@ -1,3 +1,8 @@
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
+from twisted.python.threadable import isInIOThread
+
from ..exception import CommunityNotFoundException
from ..util import call_on_reactor_thread
from .debugcommunity.community import DebugCommunity
@@ -6,7 +11,8 @@
class TestClassification(DispersyTestFunc):
- @call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_reclassify_unloaded_community(self):
"""
Load a community, reclassify it, load all communities of that classification to check.
@@ -18,24 +24,27 @@ class ClassTestB(DebugCommunity):
pass
# create master member
- master = self._dispersy.get_new_member(u"high")
+ master = yield self._dispersy.get_new_member(u"high")
# create community
- self._dispersy.database.execute(u"INSERT INTO community (master, member, classification) VALUES (?, ?, ?)",
- (master.database_id, self._mm.my_member.database_id, ClassTestA.get_classification()))
+ yield self._dispersy.database.stormdb.insert(u"community",
+ master=master.database_id,
+ member=self._mm.my_member.database_id,
+ classification=ClassTestA.get_classification())
# reclassify
- community = self._dispersy.reclassify_community(master, ClassTestB)
+ community = yield self._dispersy.reclassify_community(master, ClassTestB)
self.assertIsInstance(community, ClassTestB)
self.assertEqual(community.cid, master.mid)
try:
- classification, = self._dispersy.database.execute(u"SELECT classification FROM community WHERE master = ?",
- (master.database_id,)).next()
- except StopIteration:
+ classification, = yield self._dispersy.database.stormdb.fetchone(
+ u"SELECT classification FROM community WHERE master = ?", (master.database_id,))
+ except TypeError:
self.fail()
self.assertEqual(classification, ClassTestB.get_classification())
- @call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_reclassify_loaded_community(self):
"""
Load a community, reclassify it, load all communities of that classification to check.
@@ -47,23 +56,25 @@ class ClassTestD(DebugCommunity):
pass
# create community
- community_c = ClassTestC.create_community(self._dispersy, self._mm._my_member)
- self.assertEqual(len(list(self._dispersy.database.execute(u"SELECT * FROM community WHERE classification = ?",
- (ClassTestC.get_classification(),)))), 1)
+ community_c = yield ClassTestC.create_community(self._dispersy, self._mm._my_member)
+ count, = yield self._dispersy.database.stormdb.fetchone(
+ u"SELECT COUNT(*) FROM community WHERE classification = ?", (ClassTestC.get_classification(),))
+ self.assertEqual(count, 1)
# reclassify
- community_d = self._dispersy.reclassify_community(community_c, ClassTestD)
+ community_d = yield self._dispersy.reclassify_community(community_c, ClassTestD)
self.assertIsInstance(community_d, ClassTestD)
self.assertEqual(community_c.cid, community_d.cid)
try:
- classification, = self._dispersy.database.execute(u"SELECT classification FROM community WHERE master = ?",
- (community_c.master_member.database_id,)).next()
- except StopIteration:
+ classification, = yield self._dispersy.database.stormdb.fetchone(
+ u"SELECT classification FROM community WHERE master = ?", (community_c.master_member.database_id,))
+ except TypeError:
self.fail()
self.assertEqual(classification, ClassTestD.get_classification())
- @call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_load_one_communities(self):
"""
Try to load communities of a certain classification while there is exactly one such
@@ -73,19 +84,23 @@ class ClassificationLoadOneCommunities(DebugCommunity):
pass
# create master member
- master = self._dispersy.get_new_member(u"high")
+ master = yield self._dispersy.get_new_member(u"high")
# create one community
- self._dispersy.database.execute(u"INSERT INTO community (master, member, classification) VALUES (?, ?, ?)",
- (master.database_id, self._mm._my_member.database_id, ClassificationLoadOneCommunities.get_classification()))
+ yield self._dispersy.database.stormdb.insert(u"community",
+ master=master.database_id,
+ member=self._mm.my_member.database_id,
+ classification=ClassificationLoadOneCommunities.get_classification())
# load one community
+ master_members = yield ClassificationLoadOneCommunities.get_master_members(self._dispersy)
communities = [ClassificationLoadOneCommunities(self._dispersy, master, self._mm._my_member)
- for master in ClassificationLoadOneCommunities.get_master_members(self._dispersy)]
+ for master in master_members]
self.assertEqual(len(communities), 1)
self.assertIsInstance(communities[0], ClassificationLoadOneCommunities)
- @call_on_reactor_thread
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_load_two_communities(self):
"""
Try to load communities of a certain classification while there is exactly two such
@@ -96,27 +111,32 @@ class LoadTwoCommunities(DebugCommunity):
masters = []
# create two communities
- community = LoadTwoCommunities.create_community(self._dispersy, self._mm.my_member)
+ community = yield LoadTwoCommunities.create_community(self._dispersy, self._mm.my_member)
masters.append(community.master_member.public_key)
community.unload_community()
- community = LoadTwoCommunities.create_community(self._dispersy, self._mm.my_member)
+ community = yield LoadTwoCommunities.create_community(self._dispersy, self._mm.my_member)
masters.append(community.master_member.public_key)
community.unload_community()
# load two communities
+ master_members = yield LoadTwoCommunities.get_master_members(self._dispersy)
self.assertEqual(sorted(masters), sorted(master.public_key
- for master in LoadTwoCommunities.get_master_members(self._dispersy)))
+ for master in master_members))
communities = [LoadTwoCommunities(self._dispersy, master, self._mm._my_member)
- for master in LoadTwoCommunities.get_master_members(self._dispersy)]
+ for master in master_members]
self.assertEqual(sorted(masters), sorted(community.master_member.public_key for community in communities))
self.assertEqual(len(communities), 2)
self.assertIsInstance(communities[0], LoadTwoCommunities)
self.assertIsInstance(communities[1], LoadTwoCommunities)
- @call_on_reactor_thread
- def test_enable_autoload(self, auto_load=True):
+ @deferred(timeout=10)
+ def test_enabled_autoload(self):
+ return self.autoload_community()
+
+ @inlineCallbacks
+ def autoload_community(self, auto_load=True):
"""
Test enable autoload.
@@ -132,33 +152,32 @@ def test_enable_autoload(self, auto_load=True):
my_member = self._community.my_member
# verify auto-load is enabled (default)
- self._community.dispersy_auto_load = auto_load
- self.assertEqual(self._community.dispersy_auto_load, auto_load)
+ yield self._community.set_dispersy_auto_load(auto_load)
+ is_auto_load = yield self._community.dispersy_auto_load
+ self.assertEqual(is_auto_load, auto_load)
if auto_load:
# define auto load
- self._dispersy.define_auto_load(DebugCommunity, my_member)
+ yield self._dispersy.define_auto_load(DebugCommunity, my_member)
# create wake-up message
- wakeup = self._mm.create_full_sync_text("Should auto-load", 42)
+ wakeup = yield self._mm.create_full_sync_text("Should auto-load", 42)
# unload community
self._community.unload_community()
try:
- self._dispersy.get_community(cid, auto_load=False)
+ yield self._dispersy.get_community(cid, auto_load=False)
self.fail()
except CommunityNotFoundException:
pass
# send wakeup message
- self._mm.give_message(wakeup, self._mm)
-
- yield 0.11
+ yield self._mm.give_message(wakeup, self._mm)
# verify that the community got auto-loaded
try:
- _ = self._dispersy.get_community(cid, auto_load=False)
+ yield self._dispersy.get_community(cid, auto_load=False)
if not auto_load:
self.fail('Should not have been loaded by wakeup message')
@@ -167,7 +186,8 @@ def test_enable_autoload(self, auto_load=True):
self.fail('Should have been loaded by wakeup message')
# verify that the message was received
- self._mm.assert_count(wakeup, 1 if auto_load else 0)
+ yield self._mm.assert_count(wakeup, 1 if auto_load else 0)
+ @deferred(timeout=10)
def test_enable_disable_autoload(self):
- self.test_enable_autoload(False)
+ return self.autoload_community(False)
diff --git a/tests/test_crypto.py b/tests/test_crypto.py
index 3e6feb5a..5708f75c 100644
--- a/tests/test_crypto.py
+++ b/tests/test_crypto.py
@@ -60,7 +60,6 @@ def test_serialise_binary(self):
def test_performance(self):
from time import time
- import sys
import os
ec = self.crypto.generate_key(u"very-low")
diff --git a/tests/test_database.py b/tests/test_database.py
new file mode 100644
index 00000000..4621f9c0
--- /dev/null
+++ b/tests/test_database.py
@@ -0,0 +1,66 @@
+import os
+import shutil
+from unittest import TestCase
+
+from nose.tools import raises
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks
+
+from ..dispersydatabase import DispersyDatabase
+
+
+class TestDatabase(TestCase):
+ FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+ TEST_DATA_DIR = os.path.abspath(os.path.join(FILE_DIR, u"data"))
+ TMP_DATA_DIR = os.path.abspath(os.path.join(FILE_DIR, u"tmp"))
+
+ def setUp(self):
+ super(TestDatabase, self).setUp()
+
+ # Do not use an in-memory database. Different connections to the same
+ # in-memory database do not point towards the same database.
+ # http://stackoverflow.com/questions/3315046/sharing-a-memory-database-between-different-threads-in-python-using-sqlite3-pa
+ if not os.path.exists(self.TEST_DATA_DIR):
+ os.mkdir(self.TEST_DATA_DIR)
+
+ if not os.path.exists(self.TMP_DATA_DIR):
+ os.mkdir(self.TMP_DATA_DIR)
+
+ def tearDown(self):
+ super(TestDatabase, self).tearDown()
+ # Delete the database file if not using an in-memory database.
+ if os.path.exists(self.TMP_DATA_DIR):
+ shutil.rmtree(self.TMP_DATA_DIR, ignore_errors=True)
+
+ @raises(RuntimeError)
+ @deferred(timeout=10)
+ @inlineCallbacks
+ def test_unsupported_database_version(self):
+ minimum_version_path = os.path.abspath(os.path.join(self.TEST_DATA_DIR, u"dispersy_v1.db"))
+ tmp_path = os.path.join(self.TMP_DATA_DIR, u"dispersy.db")
+ shutil.copyfile(minimum_version_path, tmp_path)
+
+ database = DispersyDatabase(tmp_path)
+ yield database.open()
+
+ @deferred(timeout=10)
+ @inlineCallbacks
+ def test_upgrade_16_to_latest(self):
+ minimum_version_path = os.path.abspath(os.path.join(self.TEST_DATA_DIR, u"dispersy_v16.db"))
+ tmp_path = os.path.join(self.TMP_DATA_DIR, u"dispersy.db")
+ shutil.copyfile(minimum_version_path, tmp_path)
+
+ database = DispersyDatabase(tmp_path)
+ yield database.open()
+ self.assertEqual(database.database_version, 21)
+
+ @raises(RuntimeError)
+ @deferred(timeout=10)
+ @inlineCallbacks
+ def test_upgrade_version_too_high(self):
+ minimum_version_path = os.path.abspath(os.path.join(self.TEST_DATA_DIR, u"dispersy_v1337.db"))
+ tmp_path = os.path.join(self.TMP_DATA_DIR, u"dispersy.db")
+ shutil.copyfile(minimum_version_path, tmp_path)
+
+ database = DispersyDatabase(tmp_path)
+ yield database.open()
diff --git a/tests/test_destroycommunity.py b/tests/test_destroycommunity.py
index 496a527f..02e23e28 100644
--- a/tests/test_destroycommunity.py
+++ b/tests/test_destroycommunity.py
@@ -1,8 +1,14 @@
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
+
from .dispersytestclass import DispersyTestFunc
class TestDestroyCommunity(DispersyTestFunc):
+ @deferred(timeout=15)
+ @inlineCallbacks
def test_hard_kill(self):
"""
Test that a community can be hard killed and their messages will be dropped from the DB.
@@ -11,29 +17,31 @@ def test_hard_kill(self):
3. MM destroys the community.
4. Node wipes all messages from the community in the database.
"""
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
- message = node.create_full_sync_text("Should be removed", 42)
- node.give_message(message, node)
+ message = yield node.create_full_sync_text("Should be removed", 42)
+ yield node.give_message(message, node)
- node.assert_count(message, 1)
+ yield node.assert_count(message, 1)
- dmessage = self._mm.create_destroy_community(u"hard-kill")
+ dmessage = yield self._mm.create_destroy_community(u"hard-kill")
- node.give_message(dmessage, self._mm)
+ yield node.give_message(dmessage, self._mm)
- node.assert_count(message, 0)
+ yield node.assert_count(message, 0)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_hard_kill_without_permission(self):
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
- message = node.create_full_sync_text("Should not be removed", 42)
- node.give_message(message, node)
+ message = yield node.create_full_sync_text("Should not be removed", 42)
+ yield node.give_message(message, node)
- node.assert_count(message, 1)
+ yield node.assert_count(message, 1)
- dmessage = other.create_destroy_community(u"hard-kill")
- node.give_message(dmessage, self._mm)
+ dmessage = yield other.create_destroy_community(u"hard-kill")
+ yield node.give_message(dmessage, self._mm)
- node.assert_count(message, 1)
+ yield node.assert_count(message, 1)
diff --git a/tests/test_discovery.py b/tests/test_discovery.py
index d4d78c75..f2bc0ffa 100644
--- a/tests/test_discovery.py
+++ b/tests/test_discovery.py
@@ -1,8 +1,12 @@
+from time import sleep
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
+
from .dispersytestclass import DispersyTestFunc
-from ..discovery.community import DiscoveryCommunity, BOOTSTRAP_FILE_ENVNAME
from ..discovery.bootstrap import _DEFAULT_ADDRESSES
-import os
-import time
+from ..discovery.community import DiscoveryCommunity
class TestDiscovery(DispersyTestFunc):
@@ -12,21 +16,25 @@ def setUp(self):
_DEFAULT_ADDRESSES.pop()
super(TestDiscovery, self).setUp()
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_overlap(self):
def get_preferences():
return ['0' * 20, '1' * 20]
self._community.my_preferences = get_preferences
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
node._community.my_preferences = get_preferences
- node.process_packets()
- self._mm.process_packets()
- time.sleep(1)
+ yield node.process_packets()
+ yield self._mm.process_packets()
+ yield deferLater(reactor, 1.0, lambda: None)
assert node._community.is_taste_buddy_mid(self._mm.my_mid)
assert self._mm._community.is_taste_buddy_mid(node.my_mid)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_introduction(self):
def get_preferences(node_index):
return [str(i) * 20 for i in range(node_index, node_index + 2)]
@@ -39,27 +47,28 @@ def get_most_similar(orig_method, candidate):
self._community.my_preferences = lambda: get_preferences(0)
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
node._community.my_preferences = lambda: get_preferences(1)
- node.process_packets()
- self._mm.process_packets()
- time.sleep(1)
+ yield node.process_packets()
+ yield self._mm.process_packets()
+ yield deferLater(reactor, 1.0, lambda: None)
assert node._community.is_taste_buddy_mid(self._mm.my_mid)
assert self._mm._community.is_taste_buddy_mid(node.my_mid)
- other, = self.create_nodes(1)
+ other, = yield self.create_nodes(1)
other._community.my_preferences = lambda: get_preferences(2)
orig_method = other._community.get_most_similar
other._community.get_most_similar = lambda candidate: get_most_similar(orig_method, candidate)
other._community.add_discovered_candidate(self._mm.my_candidate)
- other.take_step()
+ # This calls take_step in debug node. This is wrapped in @blockincallfromthread so it's synchronous.
+ yield other.take_step()
- self._mm.process_packets()
- other.process_packets()
- time.sleep(1)
+ yield self._mm.process_packets()
+ yield other.process_packets()
+ yield deferLater(reactor, 1.0, lambda: None)
# other and mm should not be taste buddies
assert not other._community.is_taste_buddy_mid(self._mm.my_mid)
diff --git a/tests/test_double_signature.py b/tests/test_double_signature.py
index 1b91dff9..d3da5d4a 100644
--- a/tests/test_double_signature.py
+++ b/tests/test_double_signature.py
@@ -1,31 +1,37 @@
-from time import sleep
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
from .dispersytestclass import DispersyTestFunc
+
class TestDoubleSign(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_no_response_from_node(self):
"""
OTHER will request a signature from NODE. NODE will ignore this request and SELF should get
a timeout on the signature request after a few seconds.
"""
container = {"timeout": 0}
-
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
def on_response(request, response, modified):
self.assertIsNone(response)
container["timeout"] += 1
return False
- message = other.create_double_signed_text(node.my_pub_member, "Allow=True")
- other.call(other._community.create_signature_request, node.my_candidate, message, on_response, timeout=1.0)
+ message = yield other.create_double_signed_text(node.my_pub_member, "Allow=True")
+ yield other.call(other._community.create_signature_request, node.my_candidate, message, on_response, timeout=1.0)
- sleep(1.5)
+ yield deferLater(reactor, 1.5, lambda: None)
self.assertEqual(container["timeout"], 1)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_response_from_node(self):
"""
NODE will request a signature from OTHER.
@@ -33,11 +39,11 @@ def test_response_from_node(self):
"""
container = {"response": 0}
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
def on_response(request, response, modified):
- self.assertNotEqual(response, None)
+ self.assertIsNotNone(response)
self.assertEqual(container["response"], 0)
mid_signatures = dict([(member.mid, signature) for signature, member in response.authentication.signed_members])
@@ -54,11 +60,12 @@ def on_response(request, response, modified):
return False
# NODE creates the unsigned request and sends it to OTHER
- message = node.create_double_signed_text(other.my_pub_member, "Allow=True")
- node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
+ message = yield node.create_double_signed_text(other.my_pub_member, "Allow=True")
+ yield node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
# OTHER receives the request
- _, message = other.receive_message(names=[u"dispersy-signature-request"]).next()
+ received_message = yield other.receive_message(names=[u"dispersy-signature-request"])
+ _, message = received_message.next()
submsg = message.payload.message
second_signature_offset = len(submsg.packet) - other.my_member.signature_length
@@ -67,13 +74,15 @@ def on_response(request, response, modified):
self.assertEqual(submsg.packet[second_signature_offset:], "\x00" * other.my_member.signature_length, "The second signature MUST BE 0x00's.")
# reply sent by OTHER is ok, give it to NODE to process it
- other.give_message(message, node)
- node.process_packets()
+ yield other.give_message(message, node)
+ yield node.process_packets()
- sleep(1.5)
+ yield deferLater(reactor, 1.5, lambda: None)
self.assertEqual(container["response"], 1)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_modified_response_from_node(self):
"""
NODE will request a signature from OTHER.
@@ -81,11 +90,11 @@ def test_modified_response_from_node(self):
"""
container = {"response": 0}
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
def on_response(request, response, modified):
- self.assertNotEqual(response, None)
+ self.assertIsNotNone(response)
self.assertEqual(container["response"], 0)
mid_signatures = dict([(member.mid, signature) for signature, member in response.authentication.signed_members])
@@ -105,11 +114,12 @@ def on_response(request, response, modified):
return False
# NODE creates the unsigned request and sends it to OTHER
- message = node.create_double_signed_text(other.my_pub_member, "Allow=Modify")
- node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
+ message = yield node.create_double_signed_text(other.my_pub_member, "Allow=Modify")
+ yield node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
# OTHER receives the request
- _, message = other.receive_message(names=[u"dispersy-signature-request"]).next()
+ received_message = yield other.receive_message(names=[u"dispersy-signature-request"])
+ _, message = received_message.next()
submsg = message.payload.message
second_signature_offset = len(submsg.packet) - other.my_member.signature_length
@@ -118,13 +128,15 @@ def on_response(request, response, modified):
self.assertEqual(submsg.packet[second_signature_offset:], "\x00" * other.my_member.signature_length, "The second signature MUST BE 0x00's.")
# reply sent by OTHER is ok, give it to NODE to process it
- other.give_message(message, node)
- node.process_packets()
+ yield other.give_message(message, node)
+ yield node.process_packets()
- sleep(1.5)
+ yield deferLater(reactor, 1.5, lambda: None)
self.assertEqual(container["response"], 1)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_append_response_from_node(self):
"""
NODE will request a signature from OTHER.
@@ -132,8 +144,9 @@ def test_append_response_from_node(self):
"""
container = {"response": 0}
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
+
def on_response(request, response, modified):
self.assertNotEqual(response, None)
@@ -155,11 +168,12 @@ def on_response(request, response, modified):
return False
# NODE creates the unsigned request and sends it to OTHER
- message = node.create_double_signed_split_payload_text(other.my_pub_member, "Allow=Append,")
- node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
+ message = yield node.create_double_signed_split_payload_text(other.my_pub_member, "Allow=Append,")
+ yield node.call(node._community.create_signature_request, other.my_candidate, message, on_response, timeout=1.0)
# OTHER receives the request
- _, message = other.receive_message(names=[u"dispersy-signature-request"]).next()
+ received_message = yield other.receive_message(names=[u"dispersy-signature-request"])
+ _, message = received_message.next()
submsg = message.payload.message
second_signature_offset = len(submsg.packet) - other.my_member.signature_length
@@ -168,9 +182,9 @@ def on_response(request, response, modified):
self.assertEqual(submsg.packet[second_signature_offset:], "\x00" * other.my_member.signature_length, "The second signature MUST BE 0x00's.")
# reply sent by OTHER is ok, give it to NODE to process it
- other.give_message(message, node)
- node.process_packets()
+ yield other.give_message(message, node)
+ yield node.process_packets()
- sleep(1.5)
+ yield deferLater(reactor, 1.5, lambda: None)
self.assertEqual(container["response"], 1)
diff --git a/tests/test_dynamicsettings.py b/tests/test_dynamicsettings.py
index 0cea6073..a86a4d12 100644
--- a/tests/test_dynamicsettings.py
+++ b/tests/test_dynamicsettings.py
@@ -1,14 +1,20 @@
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
+
from ..resolution import PublicResolution, LinearResolution
from .dispersytestclass import DispersyTestFunc
class TestDynamicSettings(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_default_resolution(self):
"""
Ensure that the default resolution policy is used first.
"""
- other, = self.create_nodes(1)
+ other, = yield self.create_nodes(1)
meta = self._community.get_meta_message(u"dynamic-resolution-text")
@@ -18,17 +24,19 @@ def test_default_resolution(self):
self.assertEqual(proof, [])
# NODE creates a message (should allow, because the default policy is PublicResolution)
- message = self._mm.create_dynamic_resolution_text("Message #%d" % 10, policy.implement(), 10)
- other.give_message(message, self._mm)
+ message = yield self._mm.create_dynamic_resolution_text("Message #%d" % 10, policy.implement(), 10)
+ yield other.give_message(message, self._mm)
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_change_resolution(self):
"""
Change the resolution policy from default to linear.
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
meta = node._community.get_meta_message(u"dynamic-resolution-text")
linear = meta.resolution.policies[1]
@@ -38,30 +46,32 @@ def test_change_resolution(self):
self.assertIsInstance(public_policy, PublicResolution)
# change and check policy
- message = self._mm.create_dynamic_settings([(meta, linear)], 42)
- self._mm.give_message(message, self._mm)
- node.give_message(message, self._mm)
- other.give_message(message, self._mm)
+ message = yield self._mm.create_dynamic_settings([(meta, linear)], 42)
+ yield self._mm.give_message(message, self._mm)
+ yield node.give_message(message, self._mm)
+ yield other.give_message(message, self._mm)
linear_policy, proof = node.get_resolution_policy(meta, 43)
self.assertIsInstance(linear_policy, LinearResolution)
self.assertEqual(proof[0].distribution.global_time, message.distribution.global_time)
# NODE creates a message (should allow), linear policy takes effect at globaltime + 1
- message = node.create_dynamic_resolution_text("Message #%d" % 42, public_policy.implement(), 42)
- other.give_message(message, node)
- other.assert_is_stored(message)
+ message = yield node.create_dynamic_resolution_text("Message #%d" % 42, public_policy.implement(), 42)
+ yield other.give_message(message, node)
+ yield other.assert_is_stored(message)
# NODE creates another message (should drop), linear policy in effect
- message = node.create_dynamic_resolution_text("Message #%d" % 43, public_policy.implement(), 43)
- other.give_message(message, node)
- other.assert_not_stored(message)
+ message = yield node.create_dynamic_resolution_text("Message #%d" % 43, public_policy.implement(), 43)
+ yield other.give_message(message, node)
+ yield other.assert_not_stored(message)
# NODE creates another message, correct policy (should drop), no permissions
- message = node.create_dynamic_resolution_text("Message #%d" % 44, linear_policy.implement(), 44)
- other.give_message(message, node)
- other.assert_not_stored(message)
+ message = yield node.create_dynamic_resolution_text("Message #%d" % 44, linear_policy.implement(), 44)
+ yield other.give_message(message, node)
+ yield other.assert_not_stored(message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_change_resolution_undo(self):
"""
Change the resolution policy from default to linear, the messages already accepted should be
@@ -72,16 +82,16 @@ def check_policy(time_low, time_high, meta, policyclass):
policy, _ = other.get_resolution_policy(meta, global_time)
self.assertIsInstance(policy, policyclass)
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
meta = self._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
linear = meta.resolution.policies[1]
# create policy change, but do not yet process
- policy_linear = self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
- policy_public = self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
+ policy_linear = yield self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
+ policy_public = yield self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
# because above policy changes were not applied (i.e. update=False) everything is still
# PublicResolution without any proof
@@ -91,28 +101,30 @@ def check_policy(time_low, time_high, meta, policyclass):
meta = node._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
- tmessage = node.create_dynamic_resolution_text("Message #%d" % 25, public.implement(), 25)
- other.give_message(tmessage, node)
- other.assert_is_stored(tmessage)
+ tmessage = yield node.create_dynamic_resolution_text("Message #%d" % 25, public.implement(), 25)
+ yield other.give_message(tmessage, node)
+ yield other.assert_is_stored(tmessage)
# process the policy change
- other.give_message(policy_linear, self._mm)
+ yield other.give_message(policy_linear, self._mm)
check_policy(1, 12, meta, PublicResolution)
check_policy(12, 32, meta, LinearResolution)
# policy change should have undone the tmessage
- other.assert_is_undone(tmessage)
+ yield other.assert_is_undone(tmessage)
# process the policy change
- other.give_message(policy_public, self._mm)
+ yield other.give_message(policy_public, self._mm)
check_policy(1, 12, meta, PublicResolution)
check_policy(12, 22, meta, LinearResolution)
check_policy(22, 32, meta, PublicResolution)
# policy change should have redone the tmessage
- other.assert_is_done(tmessage)
+ yield other.assert_is_stored(tmessage)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_change_resolution_reject(self):
"""
Change the resolution policy from default to linear and back, to see if other requests the proof
@@ -122,23 +134,23 @@ def check_policy(time_low, time_high, meta, policyclass):
policy, _ = other.get_resolution_policy(meta, global_time)
self.assertIsInstance(policy, policyclass)
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
meta = self._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
linear = meta.resolution.policies[1]
# create policy change, but do not yet process
- policy_linear = self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
- policy_public = self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
+ policy_linear = yield self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
+ policy_public = yield self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
# because above policy changes were not applied (i.e. update=False) everything is still
# PublicResolution without any proof
check_policy(1, 32, meta, PublicResolution)
# process the policy change
- other.give_message(policy_linear, self._mm)
+ yield other.give_message(policy_linear, self._mm)
check_policy(1, 12, meta, PublicResolution)
check_policy(12, 32, meta, LinearResolution)
@@ -146,13 +158,16 @@ def check_policy(time_low, time_high, meta, policyclass):
meta = node._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
- tmessage = node.create_dynamic_resolution_text("Message #%d" % 25, public.implement(), 25)
- other.give_message(tmessage, node)
+ tmessage = yield node.create_dynamic_resolution_text("Message #%d" % 25, public.implement(), 25)
+ yield other.give_message(tmessage, node)
- _, message = node.receive_message(names=[u"dispersy-missing-proof"]).next()
- other.give_message(policy_public, self._mm)
- other.assert_is_done(tmessage)
+ received_message = yield node.receive_message(names=[u"dispersy-missing-proof"])
+ _, message = received_message.next()
+ yield other.give_message(policy_public, self._mm)
+ yield other.assert_is_stored(tmessage)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_change_resolution_send_proof(self):
"""
Change the resolution policy from default to linear and back, to see if other sends the proofs
@@ -162,20 +177,20 @@ def check_policy(time_low, time_high, meta, policyclass):
policy, _ = other.get_resolution_policy(meta, global_time)
self.assertIsInstance(policy, policyclass)
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
meta = self._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
linear = meta.resolution.policies[1]
# create policy change, but do not yet process
- policy_linear = self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
- policy_public = self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
+ policy_linear = yield self._mm.create_dynamic_settings([(meta, linear)], 11) # hence the linear policy starts at 12
+ policy_public = yield self._mm.create_dynamic_settings([(meta, public)], 21) # hence the public policy starts at 22
# process both policy changes
- other.give_message(policy_linear, self._mm)
- other.give_message(policy_public, self._mm)
+ yield other.give_message(policy_linear, self._mm)
+ yield other.give_message(policy_public, self._mm)
check_policy(1, 12, meta, PublicResolution)
check_policy(12, 22, meta, LinearResolution)
@@ -185,8 +200,9 @@ def check_policy(time_low, time_high, meta, policyclass):
meta = node._community.get_meta_message(u"dynamic-resolution-text")
public = meta.resolution.policies[0]
- tmessage = node.create_dynamic_resolution_text("Message #%d" % 12, public.implement(), 12)
- other.give_message(tmessage, node)
+ tmessage = yield node.create_dynamic_resolution_text("Message #%d" % 12, public.implement(), 12)
+ yield other.give_message(tmessage, node)
- _, message = node.receive_message(names=[u"dispersy-dynamic-settings"]).next()
+ received_message = yield node.receive_message(names=[u"dispersy-dynamic-settings"])
+ _, message = received_message.next()
assert message
diff --git a/tests/test_endpoint.py b/tests/test_endpoint.py
new file mode 100644
index 00000000..84566003
--- /dev/null
+++ b/tests/test_endpoint.py
@@ -0,0 +1,68 @@
+from tempfile import mkdtemp
+
+from ..dispersy import Dispersy
+from ..candidate import Candidate
+from ..endpoint import NullEndpoint
+from ..tests.dispersytestclass import DispersyTestFunc
+
+
+class TestSync(DispersyTestFunc):
+ """
+ This class contains tests that test the various endpoints
+ """
+
+ def setUp(self):
+ super(TestSync, self).setUp()
+ self.nodes = []
+
+ def test_null_endpoint_address(self):
+ """
+ Test that the default address is returned
+ """
+ null_endpoint = NullEndpoint()
+
+ self.assertEqual(null_endpoint.get_address(), ("0.0.0.0", 42))
+
+ def test_null_endpoint_set_address(self):
+ """
+ Test that the det address tuple is returned when set.
+ """
+ null_endpoint = NullEndpoint(address=("127.0.0.1", 1337))
+
+ self.assertEqual(null_endpoint.get_address(), ("127.0.0.1", 1337))
+
+ def test_null_endpoint_listen_to(self):
+ """
+ Tests that null endpoint listen_to does absolutely nothing
+ """
+ null_endpoint = NullEndpoint()
+ memory_database_argument = {'database_filename': u":memory:"}
+ working_directory = unicode(mkdtemp(suffix="_dispersy_test_session"))
+
+ dispersy = Dispersy(null_endpoint, working_directory, **memory_database_argument)
+ dispersy.initialize_statistics()
+ null_endpoint.open(dispersy)
+ null_endpoint.listen_to(None, None)
+
+ self.assertEqual(dispersy.statistics.total_up, 0)
+
+ def test_null_endpoint_send_packet(self):
+ """
+ Test that send packet raises the dispersy statistic' s total_up
+ """
+ null_endpoint = NullEndpoint()
+ memory_database_argument = {'database_filename': u":memory:"}
+ working_directory = unicode(mkdtemp(suffix="_dispersy_test_session"))
+
+ dispersy = Dispersy(null_endpoint, working_directory, **memory_database_argument)
+ dispersy.initialize_statistics()
+ null_endpoint.open(dispersy)
+
+ packet = "Fake packet"
+ candidate = Candidate(("197.168.0.1", 42), False)
+
+ null_endpoint.send([candidate], [packet])
+
+ expected_up = len(packet)
+
+ self.assertEqual(dispersy.statistics.total_up, expected_up)
diff --git a/tests/test_identicalpayload.py b/tests/test_identicalpayload.py
index 07d1ed4f..1b15ed8a 100644
--- a/tests/test_identicalpayload.py
+++ b/tests/test_identicalpayload.py
@@ -1,44 +1,55 @@
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
class TestIdenticalPayload(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_drop_identical_payload(self):
"""
NODE creates two messages with the same community/member/global-time.
Sends both of them to OTHER, which should drop the "lowest" one.
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
# create messages
messages = []
- messages.append(node.create_full_sync_text("Identical payload message", 42))
- messages.append(node.create_full_sync_text("Identical payload message", 42))
+ created_full_sync_text1 = yield node.create_full_sync_text("Identical payload message", 42)
+ messages.append(created_full_sync_text1)
+ created_full_sync_text2 = yield node.create_full_sync_text("Identical payload message", 42)
+ messages.append(created_full_sync_text2)
self.assertNotEqual(messages[0].packet, messages[1].packet, "the signature must make the messages unique")
# sort. we now know that the first message must be dropped
messages.sort(key=lambda x: x.packet)
# give messages in different batches
- other.give_message(messages[0], node)
- other.give_message(messages[1], node)
+ yield other.give_message(messages[0], node)
+ yield other.give_message(messages[1], node)
- other.assert_not_stored(messages[0])
- other.assert_is_stored(messages[1])
+ yield other.assert_not_stored(messages[0])
+ yield other.assert_is_stored(messages[1])
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_drop_identical(self):
"""
NODE creates one message, sends it to OTHER twice
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
# create messages
- message = node.create_full_sync_text("Message", 42)
+ message = yield node.create_full_sync_text("Message", 42)
# give messages to other
- other.give_message(message, node)
- other.give_message(message, node)
+ yield other.give_message(message, node)
+ yield other.give_message(message, node)
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
diff --git a/tests/test_member.py b/tests/test_member.py
index 37681198..e0dc331b 100644
--- a/tests/test_member.py
+++ b/tests/test_member.py
@@ -1,20 +1,26 @@
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
from ..util import call_on_reactor_thread
class TestMember(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_verify(self):
- self._test_verify(u"medium")
- self._test_verify(u"curve25519")
+ yield self._test_verify(u"medium")
+ yield self._test_verify(u"curve25519")
@call_on_reactor_thread
+ @inlineCallbacks
def _test_verify(self, curve):
"""
Test test member.verify assuming create_signature works properly.
"""
ec = self._dispersy.crypto.generate_key(curve)
- member = self._dispersy.get_member(private_key=self._dispersy.crypto.key_to_bin(ec))
+ member = yield self._dispersy.get_member(private_key=self._dispersy.crypto.key_to_bin(ec))
# sign and verify "0123456789"[0:10]
self.assertTrue(member.verify("0123456789", self._dispersy.crypto.create_signature(ec, "0123456789")))
@@ -69,17 +75,20 @@ def _test_verify(self, curve):
self.assertFalse(member.verify("0123456789E", self._dispersy.crypto.create_signature(ec, "12345678"), offset=1, length=666))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_sign(self):
- self._test_sign(u"medium")
- self._test_sign(u"curve25519")
+ yield self._test_sign(u"medium")
+ yield self._test_sign(u"curve25519")
@call_on_reactor_thread
+ @inlineCallbacks
def _test_sign(self, curve):
"""
Test test member.sign assuming is_valid_signature works properly.
"""
ec = self._dispersy.crypto.generate_key(curve)
- member = self._dispersy.get_member(private_key=self._dispersy.crypto.key_to_bin(ec))
+ member = yield self._dispersy.get_member(private_key=self._dispersy.crypto.key_to_bin(ec))
# sign and verify "0123456789"[0:10]
self.assertTrue(self._dispersy.crypto.is_valid_signature(ec, "0123456789", member.sign("0123456789")))
diff --git a/tests/test_missingidentity.py b/tests/test_missingidentity.py
index 81c46c1f..e470fd99 100644
--- a/tests/test_missingidentity.py
+++ b/tests/test_missingidentity.py
@@ -1,77 +1,89 @@
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
class TestMissingIdentity(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_incoming_missing_identity(self):
"""
NODE generates a missing-identity message and OTHER responds.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# use NODE to fetch the identities for OTHER
- other.give_message(node.create_missing_identity(other.my_member, 10), node)
+ created_missing_identity = yield node.create_missing_identity(other.my_member, 10)
+ yield other.give_message(created_missing_identity, node)
# MISSING should reply with a dispersy-identity message
- responses = node.receive_messages()
+ responses = yield node.receive_messages()
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.name, u"dispersy-identity")
self.assertEqual(response.authentication.member.public_key, other.my_member.public_key)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_outgoing_missing_identity(self):
"""
NODE generates data and sends it to OTHER, resulting in OTHER asking for the other identity.
"""
- node, other = self.create_nodes(2)
+ node, other = yield self.create_nodes(2)
# Give OTHER a message from NODE
- message = node.create_full_sync_text("Hello World", 10)
- other.give_message(message, node)
+ message = yield node.create_full_sync_text("Hello World", 10)
+ yield other.give_message(message, node)
# OTHER must not yet process the 'Hello World' message, as it hasnt received the identity message yet
- other.assert_not_stored(message)
+ yield other.assert_not_stored(message)
# OTHER must send a missing-identity to NODEs
- responses = node.receive_messages()
+ responses = yield node.receive_messages()
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.name, u"dispersy-missing-identity")
self.assertEqual(response.payload.mid, node.my_member.mid)
# NODE sends the identity to OTHER
- node.send_identity(other)
+ yield node.send_identity(other)
# OTHER must now process and store the 'Hello World' message
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_outgoing_missing_identity_twice(self):
"""
NODE generates data and sends it to OTHER twice, resulting in OTHER asking for the other identity once.
"""
- node, other = self.create_nodes(2)
+ node, other = yield self.create_nodes(2)
# Give OTHER a message from NODE
- message = node.create_full_sync_text("Hello World", 10)
- other.give_message(message, node)
+ message = yield node.create_full_sync_text("Hello World", 10)
+ yield other.give_message(message, node)
# OTHER must not yet process the 'Hello World' message, as it hasnt received the identity message yet
- other.assert_not_stored(message)
+ yield other.assert_not_stored(message)
# Give OTHER the message once again
- other.give_message(message, node)
+ yield other.give_message(message, node)
# OTHER must send a single missing-identity to NODE
- responses = node.receive_messages()
+ responses = yield node.receive_messages()
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.name, u"dispersy-missing-identity")
self.assertEqual(response.payload.mid, node.my_member.mid)
# NODE sends the identity to OTHER
- node.send_identity(other)
+ yield node.send_identity(other)
# OTHER must now process and store the 'Hello World' message
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
diff --git a/tests/test_missingmessage.py b/tests/test_missingmessage.py
index 934c8a8e..738cf28f 100644
--- a/tests/test_missingmessage.py
+++ b/tests/test_missingmessage.py
@@ -1,47 +1,69 @@
from random import shuffle
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
class TestMissingMessage(DispersyTestFunc):
+ def setUp(self):
+ super(TestMissingMessage, self).setUp()
+ self.nodes = []
+
+ @inlineCallbacks
def _test_with_order(self, batchFUNC):
"""
NODE generates a few messages and OTHER requests them one at a time.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+
+ self.nodes.append(node)
+ self.nodes.append(other)
+ self.patch_send_packet_for_nodes()
+
+ yield node.send_identity(other)
# create messages
- messages = [node.create_full_sync_text("Message #%d" % i, i + 10) for i in xrange(10)]
- node.give_messages(messages, node)
+ messages = []
+ for i in xrange(10):
+ created_full_sync_text = yield node.create_full_sync_text("Message #%d" % i, i + 10)
+ messages.append(created_full_sync_text)
+
+ yield node.give_messages(messages, node)
batches = batchFUNC(messages)
for messages in batches:
global_times = sorted([message.distribution.global_time for message in messages])
# request messages
- node.give_message(other.create_missing_message(node.my_member, global_times), other)
+ missing_message = yield other.create_missing_message(node.my_member, global_times)
+ yield node.give_message(missing_message, other)
# receive response
- responses = [response for _, response in other.receive_messages(names=[message.name])]
+ messages = yield other.receive_messages(names=[message.name])
+ responses = [response for _, response in messages]
self.assertEqual(sorted(response.distribution.global_time for response in responses), global_times)
+ @deferred(timeout=10)
def test_single_request(self):
def batch(messages):
return [[message] for message in messages]
- self._test_with_order(batch)
+ return self._test_with_order(batch)
+ @deferred(timeout=10)
def test_single_request_out_of_order(self):
def batch(messages):
shuffle(messages)
return [[message] for message in messages]
- self._test_with_order(batch)
+ return self._test_with_order(batch)
+ @deferred(timeout=10)
def test_two_at_a_time(self):
def batch(messages):
batches = []
for i in range(0, len(messages), 2):
batches.append([messages[i], messages[i + 1]])
return batches
- self._test_with_order(batch)
+ return self._test_with_order(batch)
diff --git a/tests/test_nat_detection.py b/tests/test_nat_detection.py
index bcf73775..73821381 100644
--- a/tests/test_nat_detection.py
+++ b/tests/test_nat_detection.py
@@ -1,5 +1,8 @@
from time import time
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
from ..util import call_on_reactor_thread, address_is_lan_without_netifaces
@@ -109,6 +112,7 @@ def test_symmetric_vote(self):
class TestAddressEstimation(DispersyTestFunc):
+
def test_address_in_lan_function(self):
# Positive cases:
assert address_is_lan_without_netifaces("192.168.1.5")
@@ -123,6 +127,8 @@ def test_address_in_lan_function(self):
self.assertFalse(address_is_lan_without_netifaces("123.123.123.123"))
self.assertFalse(address_is_lan_without_netifaces("42.42.42.42"))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_estimate_addresses_within_LAN(self):
"""
Tests the estimate_lan_and_wan_addresses method while NODE and OTHER are within the same LAN.
@@ -131,25 +137,25 @@ def test_estimate_addresses_within_LAN(self):
correct LAN address. OTHER will not be able to determine the WAN address for NODE, hence
this should remain unchanged.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
incorrect_LAN = ("0.0.0.0", 0)
incorrect_WAN = ("0.0.0.0", 0)
# NODE contacts OTHER with incorrect addresses
- other.give_message(node.create_introduction_request(other.my_candidate,
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate,
incorrect_LAN,
incorrect_WAN,
True,
u"unknown",
None,
42,
- 42),
- node)
+ 42)
+ yield other.give_message(created_introduction_request, node)
# NODE should receive an introduction-response with the corrected LAN address
- responses = node.receive_messages(names=[u"dispersy-introduction-response"])
+ responses = yield node.receive_messages(names=[u"dispersy-introduction-response"])
self.assertEqual(len(responses), 1)
for _, response in responses:
self.assertEqual(response.payload.destination_address, node.lan_address)
diff --git a/tests/test_neighborhood.py b/tests/test_neighborhood.py
index 0906fd68..15ccb03a 100644
--- a/tests/test_neighborhood.py
+++ b/tests/test_neighborhood.py
@@ -1,36 +1,51 @@
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+
from .debugcommunity.community import DebugCommunity
from .dispersytestclass import DispersyTestFunc
class TestNeighborhood(DispersyTestFunc):
+ @deferred(timeout=10)
def test_forward_1(self):
return self.forward(1)
+ @deferred(timeout=10)
def test_forward_10(self):
return self.forward(10)
+ @deferred(timeout=10)
def test_forward_2(self):
return self.forward(2)
+ @deferred(timeout=10)
def test_forward_3(self):
return self.forward(3)
+ @deferred(timeout=15)
def test_forward_20(self):
return self.forward(20)
+ @deferred(timeout=10)
def test_forward_0_targeted_5(self):
return self.forward(0, 5)
+ @deferred(timeout=10)
def test_forward_0_targeted_20(self):
return self.forward(0, 20)
+ @deferred(timeout=10)
def test_forward_5_targeted_2(self):
return self.forward(5, 2)
+ @deferred(timeout=10)
def test_forward_2_targeted_5(self):
return self.forward(2, 5)
+ @inlineCallbacks
def forward(self, non_targeted_node_count, targeted_node_count=0):
"""
SELF should forward created messages at least to the specified targets.
@@ -59,17 +74,18 @@ def dispersy_yield_verified_candidates():
total_node_count = non_targeted_node_count + targeted_node_count
# provide CENTRAL with a neighborhood
- nodes = self.create_nodes(total_node_count)
+ nodes = yield self.create_nodes(total_node_count)
# SELF creates a message
candidates = tuple((node.my_candidate for node in nodes[:targeted_node_count]))
- message = self._mm.create_targeted_full_sync_text("Hello World!", destination=candidates, global_time=42)
- self._dispersy._forward([message])
+ message = yield self._mm.create_targeted_full_sync_text("Hello World!", destination=candidates, global_time=42)
+ yield self._dispersy._forward([message])
# check if sufficient NODES received the message (at least the first `target_count` ones)
forwarded_node_count = 0
for node in nodes:
- forwarded = [m for _, m in node.receive_messages(names=[u"full-sync-text"], timeout=0.1)]
+ messages = yield node.receive_messages(names=[u"full-sync-text"], timeout=0.1)
+ forwarded = [m for _, m in messages]
if node in nodes[:targeted_node_count]:
# They MUST have received the message
self.assertEqual(len(forwarded), 1)
diff --git a/tests/test_overlay.py b/tests/test_overlay.py
index 11ce5bc8..6f9239aa 100644
--- a/tests/test_overlay.py
+++ b/tests/test_overlay.py
@@ -1,21 +1,18 @@
+import logging
from os import environ
from pprint import pformat
-from time import time
+from time import time, sleep
from unittest import skipUnless
-import logging
-from nose.twistedtools import reactor
from twisted.internet.defer import inlineCallbacks
-from twisted.internet.task import deferLater
+from .debugcommunity.community import DebugCommunity
+from .debugcommunity.conversion import DebugCommunityConversion
+from .dispersytestclass import DispersyTestFunc
from ..conversion import DefaultConversion
from ..dispersy import Dispersy
from ..endpoint import StandaloneEndpoint
from ..util import blocking_call_on_reactor_thread
-from .debugcommunity.community import DebugCommunity
-from .debugcommunity.conversion import DebugCommunityConversion
-from .dispersytestclass import DispersyTestFunc
-
summary_logger = logging.getLogger("test-overlay-summary")
@@ -77,15 +74,18 @@ class Info(object):
cid = cid_hex.decode("HEX")
dispersy = Dispersy(StandaloneEndpoint(0), u".", u":memory:")
- dispersy.start(autoload_discovery=True)
+ yield dispersy.initialize_statistics()
+ yield dispersy.start(autoload_discovery=True)
dispersy.statistics.enable_debug_statistics(True)
self.dispersy_objects.append(dispersy)
- community = WCommunity.init_community(dispersy, dispersy.get_member(mid=cid), dispersy.get_new_member())
+ dispersy_member = yield dispersy.get_member(mid=cid)
+ new_dispersy_member = yield dispersy.get_new_member()
+ community = yield WCommunity.init_community(dispersy, dispersy_member, new_dispersy_member)
summary_logger.info(community.cid.encode("HEX"))
history = []
begin = time()
for _ in xrange(60 * 15):
- yield deferLater(reactor, 1, lambda: None)
+ sleep(1)
now = time()
info = Info()
info.diff = now - begin
@@ -109,7 +109,7 @@ class Info(object):
len([_ for _, category in info.candidates if category == u"discovered"]),
len([_ for _, category in info.candidates if category is None]))
- dispersy.statistics.update()
+ yield dispersy.statistics.update()
summary_logger.debug("\n%s", pformat(dispersy.statistics.get_dict()))
# write graph statistics
diff --git a/tests/test_pruning.py b/tests/test_pruning.py
index bd6221cd..af2a796a 100644
--- a/tests/test_pruning.py
+++ b/tests/test_pruning.py
@@ -1,20 +1,42 @@
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+
from .dispersytestclass import DispersyTestFunc
+from twisted.internet.defer import inlineCallbacks, returnValue
+
class TestPruning(DispersyTestFunc):
+ def setUp(self):
+ super(TestPruning, self).setUp()
+ self.nodes = []
+
+ @inlineCallbacks
def _create_prune(self, node, globaltime_start, globaltime_end, store=True):
- messages = [node.create_full_sync_global_time_pruning_text("Hello World #%d" % i, i) for i in xrange(globaltime_start, globaltime_end + 1)]
+ messages = []
+ for i in xrange(globaltime_start, globaltime_end + 1):
+ created_full_sync_global_time_pruning_text = yield node.create_full_sync_global_time_pruning_text("Hello World #%d" % i, i)
+ messages.append(created_full_sync_global_time_pruning_text)
+
if store:
- node.store(messages)
- return messages
+ yield node.store(messages)
+ returnValue(messages)
+ @inlineCallbacks
def _create_normal(self, node, globaltime_start, globaltime_end, store=True):
- messages = [node.create_full_sync_text("Hello World #%d" % i, i) for i in xrange(globaltime_start, globaltime_end + 1)]
+ messages = []
+ for i in xrange(globaltime_start, globaltime_end + 1):
+ created_full_sync_text = yield node.create_full_sync_text("Hello World #%d" % i, i)
+ messages.append(created_full_sync_text)
+
if store:
- node.store(messages)
- return messages
+ yield node.store(messages)
+ returnValue(messages)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_local_creation_causes_pruning(self):
"""
NODE creates messages that should be properly pruned.
@@ -29,14 +51,14 @@ def test_local_creation_causes_pruning(self):
self.assertEqual(meta.distribution.pruning.inactive_threshold, 10, "check message configuration")
self.assertEqual(meta.distribution.pruning.prune_threshold, 20, "check message configuration")
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
- messages = self._create_prune(node, 11, 20)
+ messages = yield self._create_prune(node, 11, 20)
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
# create 10 pruning messages
inactive = messages
- messages = self._create_prune(node, 21, 30)
+ messages = yield self._create_prune(node, 21, 30)
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in inactive), "all messages should be inactive")
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
@@ -44,15 +66,17 @@ def test_local_creation_causes_pruning(self):
# create 10 pruning messages
pruned = inactive
inactive = messages
- messages = self._create_prune(node, 31, 40)
+ messages = yield self._create_prune(node, 31, 40)
self.assertTrue(all(message.distribution.pruning.is_pruned() for message in pruned), "all messages should be pruned")
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in inactive), "all messages should be inactive")
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
# pruned messages should no longer exist in the database
- node.assert_not_stored(messages=pruned)
+ yield node.assert_not_stored(messages=pruned)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_local_creation_of_other_messages_causes_pruning(self):
"""
NODE creates messages that should be properly pruned.
@@ -66,23 +90,25 @@ def test_local_creation_of_other_messages_causes_pruning(self):
self.assertEqual(meta.distribution.pruning.inactive_threshold, 10, "check message configuration")
self.assertEqual(meta.distribution.pruning.prune_threshold, 20, "check message configuration")
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
# create 10 pruning messages
- messages = self._create_prune(node, 11, 20)
+ messages = yield self._create_prune(node, 11, 20)
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
# create 10 normal messages
- self._create_normal(node, 21, 30)
+ yield self._create_normal(node, 21, 30)
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in messages), "all messages should be inactive")
# create 10 normal messages
- self._create_normal(node, 31, 40)
+ yield self._create_normal(node, 31, 40)
self.assertTrue(all(message.distribution.pruning.is_pruned() for message in messages), "all messages should be pruned")
# pruned messages should no longer exist in the database
- node.assert_not_stored(messages=messages)
+ yield node.assert_not_stored(messages=messages)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_remote_creation_causes_pruning(self):
"""
NODE creates messages that should cause pruning on OTHER
@@ -96,29 +122,31 @@ def test_remote_creation_causes_pruning(self):
self.assertEqual(meta.distribution.pruning.inactive_threshold, 10, "check message configuration")
self.assertEqual(meta.distribution.pruning.prune_threshold, 20, "check message configuration")
- node, other = self.create_nodes(2)
+ node, other = yield self.create_nodes(2)
# create 10 pruning messages
- other.give_messages(self._create_prune(node, 11, 20, store=False), node)
+ prune1 = yield self._create_prune(node, 11, 20, store=False)
+ yield other.give_messages(prune1, node)
# we need to let other fetch the messages
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
# create 10 pruning messages
- other.give_messages(self._create_prune(node, 21, 30, store=False), node)
+ prune2 = yield self._create_prune(node, 21, 30, store=False)
+ yield other.give_messages(prune2, node)
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
should_be_inactive = [message for message in messages if message.distribution.global_time <= 20]
should_be_active = [message for message in messages if 20 < message.distribution.global_time <= 30]
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in should_be_inactive), "all messages should be inactive")
self.assertTrue(all(message.distribution.pruning.is_active() for message in should_be_active), "all messages should be active")
# create 10 pruning messages
- messages = self._create_prune(node, 31, 40, store=False)
- other.give_messages(messages, node)
+ messages = yield self._create_prune(node, 31, 40, store=False)
+ yield other.give_messages(messages, node)
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
should_be_pruned = [message for message in messages if message.distribution.global_time <= 20]
should_be_inactive = [message for message in messages if 20 < message.distribution.global_time <= 30]
should_be_active = [message for message in messages if 30 < message.distribution.global_time <= 40]
@@ -127,8 +155,10 @@ def test_remote_creation_causes_pruning(self):
self.assertTrue(all(message.distribution.pruning.is_active() for message in should_be_active), "all messages should be active")
# pruned messages should no longer exist in the database
- other.assert_not_stored(messages=should_be_pruned)
+ yield other.assert_not_stored(messages=should_be_pruned)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_remote_creation_of_other_messages_causes_pruning(self):
"""
NODE creates messages that should cause pruning on OTHER
@@ -142,30 +172,34 @@ def test_remote_creation_of_other_messages_causes_pruning(self):
self.assertEqual(meta.distribution.pruning.inactive_threshold, 10, "check message configuration")
self.assertEqual(meta.distribution.pruning.prune_threshold, 20, "check message configuration")
- node, other = self.create_nodes(2)
+ node, other = yield self.create_nodes(2)
# create 10 pruning messages
- messages = self._create_prune(node, 11, 20, store=False)
- other.give_messages(messages, node)
+ messages = yield self._create_prune(node, 11, 20, store=False)
+ yield other.give_messages(messages, node)
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages), "all messages should be active")
# create 10 normal messages
- other.give_messages(self._create_normal(node, 21, 30, store=False), node)
+ normal1 = yield self._create_normal(node, 21, 30, store=False)
+ yield other.give_messages(normal1, node)
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in messages), "all messages should be inactive")
# create 10 normal messages
- other.give_messages(self._create_normal(node, 31, 40, store=False), node)
+ normal2 = yield self._create_normal(node, 31, 40, store=False)
+ yield other.give_messages(normal2, node)
- messages = other.fetch_messages([u"full-sync-global-time-pruning-text", ])
+ messages = yield other.fetch_messages([u"full-sync-global-time-pruning-text", ])
self.assertTrue(all(message.distribution.pruning.is_pruned() for message in messages), "all messages should be pruned")
# pruned messages should no longer exist in the database
- other.assert_not_stored(messages=messages)
+ yield other.assert_not_stored(messages=messages)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_sync_response_response_filtering_inactive(self):
"""
Testing the bloom filter sync.
@@ -182,26 +216,31 @@ def test_sync_response_response_filtering_inactive(self):
self.assertEqual(meta.distribution.pruning.inactive_threshold, 10, "check message configuration")
self.assertEqual(meta.distribution.pruning.prune_threshold, 20, "check message configuration")
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ self.nodes.append(node)
+ self.nodes.append(other)
+ self.patch_send_packet_for_nodes()
+
+ yield other.send_identity(node)
# OTHER creates 20 messages
- messages = self._create_prune(other, 11, 30)
+ messages = yield self._create_prune(other, 11, 30)
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in messages[0:10]), "all messages should be inactive")
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages[10:20]), "all messages should be active")
# NODE requests missing messages
sync = (1, 0, 1, 0, [])
global_time = 1 # ensure we do not increase the global time, causing further pruning
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42, global_time), node)
-
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42, global_time)
+ yield other.give_message(created_introduction_request, node)
# OTHER should return the 10 active messages
- responses = [response for _, response in node.receive_messages(names=[u"full-sync-global-time-pruning-text"])]
+ responses = yield node.receive_messages(names=[u"full-sync-global-time-pruning-text"])
+ responses = [response for _, response in responses]
self.assertEqual(len(responses), 10)
self.assertTrue(all(message.packet == response.packet for message, response in zip(messages[10:20], responses)))
# OTHER creates 5 normal messages
- self._create_normal(other, 31, 35)
+ yield self._create_normal(other, 31, 35)
self.assertTrue(all(message.distribution.pruning.is_pruned() for message in messages[0:5]), "all messages should be pruned")
self.assertTrue(all(message.distribution.pruning.is_inactive() for message in messages[5:15]), "all messages should be inactive")
self.assertTrue(all(message.distribution.pruning.is_active() for message in messages[15:20]), "all messages should be active")
@@ -209,9 +248,11 @@ def test_sync_response_response_filtering_inactive(self):
# NODE requests missing messages
sync = (1, 0, 1, 0, [])
global_time = 1 # ensure we do not increase the global time, causing further pruning
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42, global_time), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42, global_time)
+ yield other.give_message(created_introduction_request, node)
# OTHER should return the 5 active pruning messages
- responses = [response for _, response in node.receive_messages(names=[u"full-sync-global-time-pruning-text"])]
+ responses = yield node.receive_messages(names=[u"full-sync-global-time-pruning-text"])
+ responses = [response for _, response in responses]
self.assertEqual(len(responses), 5)
self.assertTrue(all(message.packet == response.packet for message, response in zip(messages[15:20], responses)))
diff --git a/tests/test_requestcache.py b/tests/test_requestcache.py
index b5b48739..0eb6ec6f 100644
--- a/tests/test_requestcache.py
+++ b/tests/test_requestcache.py
@@ -1,6 +1,7 @@
+from .dispersytestclass import DispersyTestFunc
+
from ..requestcache import RequestCache, NumberCache, RandomNumberCache
from ..util import blocking_call_on_reactor_thread
-from .dispersytestclass import DispersyTestFunc
class TestRequestCache(DispersyTestFunc):
diff --git a/tests/test_sequence.py b/tests/test_sequence.py
index e0b75e40..b09a7b40 100644
--- a/tests/test_sequence.py
+++ b/tests/test_sequence.py
@@ -1,10 +1,20 @@
from collections import defaultdict
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from .dispersytestclass import DispersyTestFunc
class TestIncomingMissingSequence(DispersyTestFunc):
+ def setUp(self):
+ super(TestIncomingMissingSequence, self).setUp()
+ self.nodes = []
+
+ @deferred(timeout=10)
+ @inlineCallbacks
+ # TODO(Laurens): This is not called?
def incoming_simple_conflict_different_global_time(self):
"""
A broken NODE creates conflicting messages with the same sequence number that OTHER should
@@ -16,60 +26,64 @@ def incoming_simple_conflict_different_global_time(self):
- etc...
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
msgs = defaultdict(dict)
+ @inlineCallbacks
+ # TODO(Laurens): Add yields in front of the function calls of this function to handle the deferred
+ # once this method will be used again.
def get_message(global_time, seq):
if not global_time in msgs or not seq in msgs[global_time]:
- msgs[global_time][seq] = node.create_sequence_text("M@%d#%d" % (global_time, seq), global_time, seq)
- return msgs[global_time][seq]
+ msgs[global_time][seq] = yield node.create_sequence_text("M@%d#%d" % (global_time, seq), global_time, seq)
+ message = msgs[global_time][seq]
+ returnValue(message)
# NODE must accept M@6#1
- other.give_message(get_message(6, 1), node)
- other.assert_is_stored(get_message(6, 1))
+ yield other.give_message(get_message(6, 1), node)
+ yield other.assert_is_stored(get_message(6, 1))
# NODE must reject M@6#1 (already have this message)
- other.give_message(get_message(6, 1), node)
- other.assert_is_stored(get_message(6, 1))
+ yield other.give_message(get_message(6, 1), node)
+ yield other.assert_is_stored(get_message(6, 1))
# NODE must prefer M@5#1 (duplicate sequence number, prefer lower global time)
- other.give_message(get_message(5, 1), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_not_stored(get_message(6, 1))
+ yield other.give_message(get_message(5, 1), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_not_stored(get_message(6, 1))
# NODE must reject M@6#1 (duplicate sequence number, prefer lower global time)
- other.give_message(get_message(6, 1), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_not_stored(get_message(6, 1))
+ yield other.give_message(get_message(6, 1), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_not_stored(get_message(6, 1))
# NODE must reject M@4#2 (global time is lower than previous global time in sequence)
- other.give_message(get_message(4, 2), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_not_stored(get_message(4, 2))
+ yield other.give_message(get_message(4, 2), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_not_stored(get_message(4, 2))
# NODE must reject M@5#2 (duplicate global time)
- other.give_message(get_message(5, 2), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_not_stored(get_message(5, 2))
+ yield other.give_message(get_message(5, 2), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_not_stored(get_message(5, 2))
# NODE must accept M@7#2
- other.give_message(get_message(6, 2), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_is_stored(get_message(6, 2))
+ yield other.give_message(get_message(6, 2), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_is_stored(get_message(6, 2))
# NODE must accept M@8#3
- other.give_message(get_message(8, 3), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_is_stored(get_message(6, 2))
- other.assert_is_stored(get_message(8, 3))
+ yield other.give_message(get_message(8, 3), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_is_stored(get_message(6, 2))
+ yield other.assert_is_stored(get_message(8, 3))
# NODE must accept M@9#4
- other.give_message(get_message(9, 4), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_is_stored(get_message(6, 2))
- other.assert_is_stored(get_message(8, 3))
- other.assert_is_stored(get_message(9, 4))
+ yield other.give_message(get_message(9, 4), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_is_stored(get_message(6, 2))
+ yield other.assert_is_stored(get_message(8, 3))
+ yield other.assert_is_stored(get_message(9, 4))
# NODE must accept M@7#3
# It would be possible to keep M@9#4, but the way that the code is structures makes this
@@ -77,166 +91,250 @@ def get_message(global_time, seq):
# have to delete). In the future we can optimize by pushing the newer messages (such as
# M@7#3) into the waiting or incoming packet queue, this will allow them to be re-inserted
# after M@6#2 has been fully accepted.
- other.give_message(get_message(7, 3), node)
- other.assert_is_stored(get_message(5, 1))
- other.assert_is_stored(get_message(6, 2))
- other.assert_is_stored(get_message(7, 3))
+ yield other.give_message(get_message(7, 3), node)
+ yield other.assert_is_stored(get_message(5, 1))
+ yield other.assert_is_stored(get_message(6, 2))
+ yield other.assert_is_stored(get_message(7, 3))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_1(self):
- self.requests(1, [1], (1, 1))
+ yield self.requests(1, [1], (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_2(self):
- self.requests(1, [10], (10, 10))
+ yield self.requests(1, [10], (10, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_3(self):
- self.requests(1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], (1, 10))
+ yield self.requests(1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], (1, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_4(self):
- self.requests(1, [3, 4, 5, 6, 7, 8, 9, 10], (3, 10))
+ yield self.requests(1, [3, 4, 5, 6, 7, 8, 9, 10], (3, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_5(self):
- self.requests(1, [1, 2, 3, 4, 5, 6, 7], (1, 7))
+ yield self.requests(1, [1, 2, 3, 4, 5, 6, 7], (1, 7))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_6(self):
- self.requests(1, [3, 4, 5, 6, 7], (3, 7))
+ yield self.requests(1, [3, 4, 5, 6, 7], (3, 7))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_1(self):
- self.requests(2, [1], (1, 1))
+ yield self.requests(2, [1], (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_2(self):
- self.requests(2, [10], (10, 10))
+ yield self.requests(2, [10], (10, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_3(self):
- self.requests(2, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], (1, 10))
+ yield self.requests(2, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], (1, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_4(self):
- self.requests(2, [3, 4, 5, 6, 7, 8, 9, 10], (3, 10))
+ yield self.requests(2, [3, 4, 5, 6, 7, 8, 9, 10], (3, 10))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_5(self):
- self.requests(2, [1, 2, 3, 4, 5, 6, 7], (1, 7))
+ yield self.requests(2, [1, 2, 3, 4, 5, 6, 7], (1, 7))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_6(self):
- self.requests(2, [3, 4, 5, 6, 7], (3, 7))
+ yield self.requests(2, [3, 4, 5, 6, 7], (3, 7))
# multi-range requests
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_7(self):
- self.requests(1, [1], (1, 1), (1, 1), (1, 1))
+ yield self.requests(1, [1], (1, 1), (1, 1), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_9(self):
- self.requests(1, [1, 2, 3, 4, 5], (1, 2), (2, 3), (3, 4), (4, 5))
+ yield self.requests(1, [1, 2, 3, 4, 5], (1, 2), (2, 3), (3, 4), (4, 5))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_11(self):
- self.requests(1, [1, 2, 4, 5, 7, 8], (1, 2), (4, 5), (7, 8))
+ yield self.requests(1, [1, 2, 4, 5, 7, 8], (1, 2), (4, 5), (7, 8))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_7(self):
- self.requests(2, [1], (1, 1), (1, 1), (1, 1))
+ yield self.requests(2, [1], (1, 1), (1, 1), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_9(self):
- self.requests(2, [1, 2, 3, 4, 5], (1, 2), (2, 3), (3, 4), (4, 5))
+ yield self.requests(2, [1, 2, 3, 4, 5], (1, 2), (2, 3), (3, 4), (4, 5))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_11(self):
- self.requests(2, [1, 2, 4, 5, 7, 8], (1, 2), (4, 5), (7, 8))
+ yield self.requests(2, [1, 2, 4, 5, 7, 8], (1, 2), (4, 5), (7, 8))
# multi-range requests, in different orders
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_13(self):
- self.requests(1, [1], (1, 1), (1, 1), (1, 1))
+ yield self.requests(1, [1], (1, 1), (1, 1), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_15(self):
- self.requests(1, [1, 2, 3, 4, 5], (4, 5), (3, 4), (1, 2), (2, 3))
+ yield self.requests(1, [1, 2, 3, 4, 5], (4, 5), (3, 4), (1, 2), (2, 3))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_16(self):
- self.requests(1, [1, 5], (5, 5), (1, 1))
+ yield self.requests(1, [1, 5], (5, 5), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_17(self):
- self.requests(1, [1, 2, 4, 5, 7, 8], (1, 2), (7, 8), (4, 5))
+ yield self.requests(1, [1, 2, 4, 5, 7, 8], (1, 2), (7, 8), (4, 5))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_13(self):
- self.requests(2, [1], (1, 1), (1, 1), (1, 1))
+ yield self.requests(2, [1], (1, 1), (1, 1), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_15(self):
- self.requests(2, [1, 2, 3, 4, 5], (4, 5), (3, 4), (1, 2), (2, 3))
+ yield self.requests(2, [1, 2, 3, 4, 5], (4, 5), (3, 4), (1, 2), (2, 3))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_16(self):
- self.requests(2, [1, 5], (5, 5), (1, 1))
+ yield self.requests(2, [1, 5], (5, 5), (1, 1))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_17(self):
- self.requests(2, [1, 2, 4, 5, 7, 8], (1, 2), (7, 8), (4, 5))
+ yield self.requests(2, [1, 2, 4, 5, 7, 8], (1, 2), (7, 8), (4, 5))
# single range requests, invalid requests
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_19(self):
- self.requests(1, [10], (10, 11))
+ yield self.requests(1, [10], (10, 11))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_20(self):
- self.requests(1, [], (11, 11))
+ yield self.requests(1, [], (11, 11))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_19(self):
- self.requests(2, [10], (10, 11))
+ yield self.requests(2, [10], (10, 11))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_20(self):
- self.requests(2, [], (11, 11))
+ yield self.requests(2, [], (11, 11))
# multi-range requests, invalid requests
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_23(self):
- self.requests(1, [10], (10, 11), (10, 100), (50, 75))
+ yield self.requests(1, [10], (10, 11), (10, 100), (50, 75))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_1_24(self):
- self.requests(1, [], (11, 11), (11, 50), (100, 200))
+ yield self.requests(1, [], (11, 11), (11, 50), (100, 200))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_23(self):
- self.requests(2, [10], (10, 11), (10, 100), (50, 75))
+ yield self.requests(2, [10], (10, 11), (10, 100), (50, 75))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_requests_2_24(self):
- self.requests(2, [], (11, 11), (11, 50), (100, 200))
+ yield self.requests(2, [], (11, 11), (11, 50), (100, 200))
+ @inlineCallbacks
def requests(self, node_count, expected_responses, *pairs):
"""
NODE1 through NODE requests OTHER (non)overlapping sequences, OTHER should send back the requested messages
only once.
"""
- other, = self.create_nodes(1)
- nodes = self.create_nodes(node_count)
+ other, = yield self.create_nodes(1)
+ nodes = yield self.create_nodes(node_count)
+ self.nodes.append(other)
+ self.nodes.extend(nodes)
+
+ self.patch_send_packet_for_nodes()
+
for node in nodes:
other.send_identity(node)
- messages = [other.create_sequence_text("Sequence message #%d" % i, i + 10, i) for i in range(1, 11)]
- other.store(messages)
+ messages = []
+ for i in xrange(1, 11):
+ created_sequence_text = yield other.create_sequence_text("Sequence message #%d" % i, i + 10, i)
+ messages.append(created_sequence_text)
+
+ yield other.store(messages)
# request missing
# first, create all messages
rmessages = defaultdict(list)
for low, high in pairs:
for node in nodes:
- rmessages[node].append(node.create_missing_sequence(other.my_member, messages[0].meta, low, high))
+ missing_sequence = yield node.create_missing_sequence(other.my_member, messages[0].meta, low, high)
+ rmessages[node].append(missing_sequence)
# then, send them to other
for node in nodes:
for message in rmessages[node]:
- other.give_message(message, node, cache=True)
+ yield other.give_message(message, node, cache=True)
# receive response
for node in nodes:
- responses = [response.distribution.sequence_number for _, response in node.receive_messages(names=[u"sequence-text"], timeout=0.1)]
+ messages = yield node.receive_messages(names=[u"sequence-text"], timeout=0.1)
+ responses = [response.distribution.sequence_number for _, response in messages]
self.assertEqual(len(responses), len(expected_responses))
for seq, expected_seq in zip(responses, expected_responses):
self.assertEqual(seq, expected_seq)
+
class TestOutgoingMissingSequence(DispersyTestFunc):
+ @deferred(timeout=15)
+ @inlineCallbacks
def test_missing(self):
"""
NODE sends message while OTHER doesn't have the prior sequence numbers, OTHER should request these messages.
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
- messages = [node.create_sequence_text("Sequence message #%d" % sequence, sequence + 10, sequence)
- for sequence
- in range(1, 11)]
+ messages = []
+ for sequence in range(1, 11):
+ created_sequence_text = yield node.create_sequence_text("Sequence message #%d" % sequence, sequence + 10, sequence)
+ messages.append(created_sequence_text)
# NODE gives #5, hence OTHER will request [#1:#4]
- other.give_message(messages[4], node)
- requests = node.receive_messages(names=[u"dispersy-missing-sequence"])
+ yield other.give_message(messages[4], node)
+ requests = yield node.receive_messages(names=[u"dispersy-missing-sequence"])
self.assertEqual(len(requests), 1)
_, request = requests[0]
@@ -246,14 +344,14 @@ def test_missing(self):
self.assertEqual(request.payload.missing_high, 4)
# NODE gives the missing packets, database should now contain [#1:#5]
- other.give_messages(messages[0:4], node)
+ yield other.give_messages(messages[0:4], node)
for message in messages[0:5]:
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
# NODE gives #10, hence OTHER will request [#6:#9]
- other.give_message(messages[9], node)
- requests = node.receive_messages(names=[u"dispersy-missing-sequence"])
+ yield other.give_message(messages[9], node)
+ requests = yield node.receive_messages(names=[u"dispersy-missing-sequence"])
self.assertEqual(len(requests), 1)
_, request = requests[0]
@@ -263,7 +361,7 @@ def test_missing(self):
self.assertEqual(request.payload.missing_high, 9)
# NODE gives the missing packets, database should now contain [#1:#10]
- other.give_messages(messages[5:9], node)
+ yield other.give_messages(messages[5:9], node)
for message in messages:
- other.assert_is_stored(message)
+ yield other.assert_is_stored(message)
diff --git a/tests/test_signature.py b/tests/test_signature.py
index 129322dc..ad722f4e 100644
--- a/tests/test_signature.py
+++ b/tests/test_signature.py
@@ -1,20 +1,24 @@
-from time import sleep
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+from twisted.internet.task import deferLater
from .dispersytestclass import DispersyTestFunc
class TestSignature(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_invalid_public_key(self):
"""
NODE sends a message containing an invalid public-key to OTHER.
OTHER should drop it
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
- message = node.create_bin_key_text('Should drop')
- packet = node.encode_message(message)
+ message = yield node.create_bin_key_text('Should drop')
+ packet = yield node.encode_message(message)
# replace the valid public-key with an invalid one
public_key = node.my_member.public_key
@@ -24,26 +28,30 @@ def test_invalid_public_key(self):
self.assertNotEqual(packet, invalid_packet)
# give invalid message to OTHER
- other.give_packet(invalid_packet, node)
+ yield other.give_packet(invalid_packet, node)
- self.assertEqual(other.fetch_messages([u"bin-key-text", ]), [])
+ other_messages = yield other.fetch_messages([u"bin-key-text", ])
+ self.assertEqual(other_messages, [])
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_invalid_signature(self):
"""
NODE sends a message containing an invalid signature to OTHER.
OTHER should drop it
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
- message = node.create_full_sync_text('Should drop')
- packet = node.encode_message(message)
+ message = yield node.create_full_sync_text('Should drop')
+ packet = yield node.encode_message(message)
# replace the valid signature with an invalid one
invalid_packet = packet[:-node.my_member.signature_length] + 'I' * node.my_member.signature_length
self.assertNotEqual(packet, invalid_packet)
# give invalid message to OTHER
- other.give_packet(invalid_packet, node)
+ yield other.give_packet(invalid_packet, node)
- self.assertEqual(other.fetch_messages([u"full-sync-text", ]), [])
+ other_messages = yield other.fetch_messages([u"full-sync-text", ])
+ self.assertEqual(other_messages, [])
diff --git a/tests/test_storm_db_manager.py b/tests/test_storm_db_manager.py
new file mode 100644
index 00000000..44c9137b
--- /dev/null
+++ b/tests/test_storm_db_manager.py
@@ -0,0 +1,328 @@
+import os
+
+
+from twisted.internet.defer import DeferredList, inlineCallbacks
+from unittest import TestCase
+
+from nose.twistedtools import deferred, reactor
+
+from ..util import blockingCallFromThread
+from ..StormDBManager import StormDBManager
+
+
+class TestStormDBManager(TestCase):
+ FILE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+ TEST_DATA_DIR = os.path.abspath(os.path.join(FILE_DIR, u"data"))
+ SQLITE_TEST_DB = os.path.abspath(os.path.join(TEST_DATA_DIR, u"test.db"))
+
+ def setUp(self):
+ super(TestStormDBManager, self).setUp()
+
+ # Do not use an in-memory database. Different connections to the same
+ # in-memory database do not point towards the same database.
+ # http://stackoverflow.com/questions/3315046/sharing-a-memory-database-between-different-threads-in-python-using-sqlite3-pa
+ if not os.path.exists(self.TEST_DATA_DIR):
+ os.mkdir(self.TEST_DATA_DIR)
+ self.storm_db = StormDBManager("sqlite:%s" % self.SQLITE_TEST_DB)
+ blockingCallFromThread(reactor, self.storm_db.initialize)
+
+ def tearDown(self):
+ super(TestStormDBManager, self).tearDown()
+ # Delete the database file if not using an in-memory database.
+ if os.path.exists(self.SQLITE_TEST_DB):
+ os.unlink(self.SQLITE_TEST_DB)
+
+ def create_car_database(self):
+ """
+ Creates a table with the name "car".
+ Contains one column named "brand".
+ :return: A deferred that fires once the table has been made.
+ """
+ sql = u"CREATE TABLE car(brand);"
+ return self.storm_db.execute(sql)
+
+ def create_myinfo_table(self):
+ """
+ Creates a table with the name "MyInfo".
+ Contains two columns: one with "entry" and one named "value".
+ :return: A deferred that fires once the table has been made.
+ """
+ sql = u"""
+ CREATE TABLE MyInfo (
+ entry PRIMARY KEY,
+ value text
+ );
+ """
+ return self.storm_db.execute(sql)
+
+ @deferred(timeout=5)
+ def test_execute_function(self):
+ """
+ Checks if the execute function returns None when not providing the get_lastrowid argument.
+ """
+ def check_return_is_none(result):
+ self.assertIsNone(result)
+
+ result_deferred = self.create_car_database()
+ result_deferred.addCallback(check_return_is_none)
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_insert_and_fetchone(self):
+ """
+ This test tests the insert functionality and the fetch_one function.
+ """
+
+ def assert_result(result):
+ self.assertIsInstance(result, tuple, "Result was not a tuple!")
+ self.assertEquals(result[0], "BMW", "Result did not contain BMW as expected!")
+
+ def fetch_inserted(_):
+ sql = u"SELECT * FROM car"
+ return self.storm_db.fetchone(sql)
+
+ def insert_into_db(_):
+ return self.storm_db.insert( "car", brand="BMW")
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(insert_into_db) # Insert one value
+ result_deferred.addCallback(fetch_inserted) # Fetch the value
+ result_deferred.addCallback(assert_result) # Assert the result
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_insert_and_fetchall(self):
+ """
+ This test tests the insert_many functionality and the fetch_all functionality.
+ """
+
+ def assert_result(result):
+ self.assertIsInstance(result, list, "Result was not a list!")
+ self.assertEquals(result[0][0], "BMW", "First result did not contain BMW as expected!")
+ self.assertEquals(result[1][0], "Volvo", "Seconds result did not contain Volvo as expected!")
+
+ def fetch_inserted(_):
+ sql = u"SELECT * FROM car"
+ return self.storm_db.fetchall(sql)
+
+ def insert_into_db(_):
+ insert_values = []
+ insert_values.append({"brand": "BMW"})
+ insert_values.append({"brand": "Volvo"})
+ return self.storm_db.insert_many( "car", insert_values)
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(insert_into_db) # Insert two value
+ result_deferred.addCallback(fetch_inserted) # Fetch all values
+ result_deferred.addCallback(assert_result) # Assert the results
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_remove_single_element(self):
+ """
+ This test tests the delete function by using a single element as value.
+ """
+
+ def assert_result(result):
+ self.assertIsInstance(result, list, "Result was not a list!")
+ self.assertEquals(result[0][0], "Volvo", "First result was not Volvo as expected!")
+
+ def fetch_inserted(_):
+ sql = u"SELECT * FROM car"
+ return self.storm_db.fetchall(sql)
+
+ def delete_one(_):
+ return self.storm_db.delete( "car", brand="BMW")
+
+ def insert_into_db(_):
+ insert_values = []
+ insert_values.append({"brand": "BMW"})
+ insert_values.append({"brand": "Volvo"})
+ return self.storm_db.insert_many("car", insert_values)
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(insert_into_db) # Insert two value
+ result_deferred.addCallback(delete_one) # Delete one value by using a single element
+ result_deferred.addCallback(fetch_inserted) # Fetch all values
+ result_deferred.addCallback(assert_result) # Assert the results
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_remove_tuple(self):
+ """
+ This test tests the delete function by using a tuple as value.
+ """
+
+ def assert_result(result):
+ self.assertIsInstance(result, list, "Result was not a list!")
+ self.assertEquals(result[0][0], "Volvo", "First result was not Volvo as expected!")
+
+ def fetch_inserted(_):
+ sql = u"SELECT * FROM car"
+ return self.storm_db.fetchall(sql)
+
+ def delete_one(_):
+ return self.storm_db.delete("car", brand=("LIKE", "BMW"))
+
+ def insert_into_db(_):
+ insert_values = []
+ insert_values.append({"brand": "BMW"})
+ insert_values.append({"brand": "Volvo"})
+ return self.storm_db.insert_many("car", insert_values)
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(insert_into_db) # Insert two value
+ result_deferred.addCallback(delete_one) # Delete one value by using a tuple containing an operator
+ result_deferred.addCallback(fetch_inserted) # Fetch all values
+ result_deferred.addCallback(assert_result) # Assert the results
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_size(self):
+ """
+ This test tests the size function.
+ """
+
+ def assert_result(result):
+ self.assertIsInstance(result, tuple, "Result was not a tuple!")
+ self.assertEquals(result[0], 2, "Result was not 2")
+
+ def get_size(_):
+ return self.storm_db.count("car")
+
+ def insert_into_db(_):
+ list = []
+ list.append({"brand": "BMW"})
+ list.append({"brand": "Volvo"})
+ return self.storm_db.insert_many("car", list)
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(insert_into_db) # Insert two value
+ result_deferred.addCallback(get_size) # Get the size
+ result_deferred.addCallback(assert_result) # Assert the result
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_version_no_table(self):
+ """
+ This test tests whether the version is 0 if an sql error occurs.
+ In this case the table MyInfo does not exist.
+ """
+
+ def assert_result(_):
+ self.assertIsInstance(self.storm_db._version, int, "_version field is not an int!")
+ self.assertEqual(self.storm_db._version, 0, "Version was not 0 but: %r" % self.storm_db._version)
+
+ def get_size(_):
+ return self.storm_db.count("car")
+
+ result_deferred = self.create_car_database() # Create the car table
+ result_deferred.addCallback(get_size) # Get the version
+ result_deferred.addCallback(assert_result) # Assert the version
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_version_myinfo_table(self):
+ """
+ This test tests whether the version is 2 if the MyInfo table exists.
+ """
+
+ def assert_result(_):
+ self.assertIsInstance(self.storm_db._version, int, "_version field is not an int!")
+ self.assertEqual(self.storm_db._version, 2, "Version was not 2 but: %r" % self.storm_db._version)
+
+ def get_version(_):
+ return self.storm_db._retrieve_version()
+
+ def insert_version(_):
+ return self.storm_db.insert("MyInfo", entry="version", value="2")
+
+ result_deferred = self.create_myinfo_table() # Create the database
+ result_deferred.addCallback(insert_version) # Get the version
+ result_deferred.addCallback(get_version) # Let the manager retrieve the version (again).
+ result_deferred.addCallback(assert_result) # Assert the version
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ def test_synchronous_insert_with_lock(self):
+ """
+ This test tests that if you schedule three calls simultaneously, that
+ by the mechanism of the lock they still are executed synchronously.
+ """
+
+ def assert_sequence(result):
+ self.assertIsInstance(result, list, "Result was not of type list but: %r" % result)
+ self.assertEqual(len(result), 3, "Result list didn't contain 3 tuples but: %r" % len(result))
+ self.assertEqual(result[0][1], 1)
+ self.assertEqual(result[1][1], 2)
+ self.assertEqual(result[2][1], 3)
+
+ def fetch_all(_):
+ sql = u"SELECT * FROM numtest"
+ return self.storm_db.fetchall(sql)
+
+ defer_list = []
+
+ def schedule_tree_inserts(_):
+ for i in xrange(1, 4):
+ defer_list.append(self.storm_db.insert( "numtest", num=i))
+
+ return DeferredList(defer_list)
+
+ def create_numtest_db():
+ sql = u"""
+ CREATE TABLE numtest (
+ id INTEGER PRIMARY KEY,
+ num INTEGER
+ );
+ """
+ return self.storm_db.execute(sql)
+
+ result_deferred = create_numtest_db()
+ result_deferred.addCallback(schedule_tree_inserts)
+ result_deferred.addCallback(fetch_all)
+ result_deferred.addCallback(assert_sequence)
+
+ return result_deferred
+
+ @deferred(timeout=5)
+ @inlineCallbacks
+ def test_insert(self):
+ """
+ Tests if you get the last inserted row id from execute if get_lastrowid is set to True.
+ """
+ sql = u"""
+ CREATE TABLE numtest (
+ id INTEGER PRIMARY KEY,
+ num INTEGER
+ );
+ """
+ yield self.storm_db.execute(sql)
+ id = yield self.storm_db.execute(u"INSERT INTO numtest (num) VALUES(?)", (1,), get_lastrowid=True)
+ self.assertEqual(id, 1)
+
+ @deferred(timeout=5)
+ @inlineCallbacks
+ def test_executemany(self):
+ """
+ Tests if execute many works sequentially executes the queries as expected.
+ """
+ sql = u"""
+ CREATE TABLE numtest (
+ id INTEGER PRIMARY KEY,
+ num INTEGER
+ );
+ """
+ yield self.storm_db.execute(sql)
+ query_arguments = [(1,), (2,), (3,)]
+ sql = u"INSERT INTO numtest (num) VALUES(?)"
+ yield self.storm_db.executemany(sql, query_arguments)
+ count, = yield self.storm_db.count("numtest")
+ self.assertEquals(count, 3)
diff --git a/tests/test_sync.py b/tests/test_sync.py
index 9454bdc5..373b3132 100644
--- a/tests/test_sync.py
+++ b/tests/test_sync.py
@@ -1,24 +1,41 @@
+from nose.twistedtools import deferred
+
from .dispersytestclass import DispersyTestFunc
+from twisted.internet.defer import inlineCallbacks, returnValue
class TestSync(DispersyTestFunc):
+ def setUp(self):
+ super(TestSync, self).setUp()
+ self.nodes = []
+
+ @inlineCallbacks
def _create_nodes_messages(self, messagetype="create_full_sync_text"):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
+ yield other.send_identity(node)
# other creates messages
- messages = [getattr(other, messagetype)("Message %d" % i, i + 10) for i in xrange(30)]
- other.store(messages)
+ messages = []
+ for i in xrange(30):
+ text_message = yield getattr(other, messagetype)("Message %d" % i, i + 10)
+ messages.append(text_message)
+ yield other.store(messages)
- return node, other, messages
+ returnValue((node, other, messages))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_modulo(self):
"""
OTHER creates several messages, NODE asks for specific modulo to sync and only those modulo
may be sent back.
"""
- node, other, messages = self._create_nodes_messages()
+ node, other, messages = yield self._create_nodes_messages()
for modulo in xrange(0, 10):
for offset in xrange(0, modulo):
@@ -26,16 +43,18 @@ def test_modulo(self):
global_times = [message.distribution.global_time for message in messages if (message.distribution.global_time + offset) % modulo == 0]
sync = (1, 0, modulo, offset, [])
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42)
+ yield other.give_message(created_introduction_request, node)
- responses = node.receive_messages(names=[u"full-sync-text"], return_after=len(global_times))
+ responses = yield node.receive_messages(names=[u"full-sync-text"], return_after=len(global_times))
response_times = [message.distribution.global_time for _, message in responses]
self.assertEqual(sorted(global_times), sorted(response_times))
-
+ @deferred(timeout=15)
+ @inlineCallbacks
def test_range(self):
- node, other, messages = self._create_nodes_messages()
+ node, other, messages = yield self._create_nodes_messages()
for time_low in xrange(1, 11):
for time_high in xrange(20, 30):
@@ -43,71 +62,107 @@ def test_range(self):
global_times = [message.distribution.global_time for message in messages if time_low <= message.distribution.global_time <= time_high]
sync = (time_low, time_high, 1, 0, [])
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", sync, 42)
+ yield other.give_message(created_introduction_request, node)
- responses = node.receive_messages(names=[u"full-sync-text"], return_after=len(global_times))
+ responses = yield node.receive_messages(names=[u"full-sync-text"], return_after=len(global_times))
response_times = [message.distribution.global_time for _, message in responses]
self.assertEqual(sorted(global_times), sorted(response_times))
-
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_in_order(self):
- node, other, messages = self._create_nodes_messages('create_in_order_text')
+ node, other, messages = yield self._create_nodes_messages('create_in_order_text')
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
global_times = [message.distribution.global_time for message in messages]
# send an empty sync message to obtain all messages ASC order
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42)
+ yield other.give_message(created_introduction_request, node)
- responses = node.receive_messages(names=[u"ASC-text"], return_after=len(global_times))
+ responses = yield node.receive_messages(names=[u"ASC-text"], return_after=len(global_times))
response_times = [message.distribution.global_time for _, message in responses]
self.assertEqual(sorted(global_times), sorted(response_times))
-
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_out_order(self):
- node, other, messages = self._create_nodes_messages('create_out_order_text')
+ node, other, messages = yield self._create_nodes_messages('create_out_order_text')
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
global_times = [message.distribution.global_time for message in messages]
# send an empty sync message to obtain all messages DESC order
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42)
+ yield other.give_message(created_introduction_request, node)
- responses = node.receive_messages(names=[u"DESC-text"], return_after=len(global_times))
+ responses = yield node.receive_messages(names=[u"DESC-text"], return_after=len(global_times))
response_times = [message.distribution.global_time for _, message in responses]
self.assertEqual(sorted(global_times), sorted(response_times))
-
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_random_order(self):
- node, other, messages = self._create_nodes_messages('create_random_order_text')
+ node, other, messages = yield self._create_nodes_messages('create_random_order_text')
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
global_times = [message.distribution.global_time for message in messages]
# send an empty sync message to obtain all messages in RANDOM order
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (min(global_times), 0, 1, 0, []), 42)
+ yield other.give_message(created_introduction_request, node)
- responses = node.receive_messages(names=[u"RANDOM-text"], return_after=len(global_times))
+ responses = yield node.receive_messages(names=[u"RANDOM-text"], return_after=len(global_times))
response_times = [message.distribution.global_time for _, message in responses]
self.assertNotEqual(response_times, sorted(global_times))
self.assertNotEqual(response_times, sorted(global_times, reverse=True))
-
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_mixed_order(self):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
+ yield other.send_identity(node)
# OTHER creates messages
- in_order_messages = [other.create_in_order_text("Message %d" % i, i + 10) for i in xrange(0, 30, 3)]
- out_order_messages = [other.create_out_order_text("Message %d" % i, i + 10) for i in xrange(1, 30, 3)]
- random_order_messages = [other.create_random_order_text("Message %d" % i, i + 10) for i in xrange(2, 30, 3)]
+ in_order_messages = []
+ for i in xrange(0, 30, 3):
+ created_in_order_text = yield other.create_in_order_text("Message %d" % i, i + 10)
+ in_order_messages.append(created_in_order_text)
- other.store(in_order_messages)
- other.store(out_order_messages)
- other.store(random_order_messages)
+ out_order_messages = []
+ for i in xrange(1, 30, 3):
+ created_out_order_text = yield other.create_out_order_text("Message %d" % i, i + 10)
+ out_order_messages.append(created_out_order_text)
+
+ random_order_messages = []
+ for i in xrange(2, 30, 3):
+ created_random_order_text = yield other.create_random_order_text("Message %d" % i, i + 10)
+ random_order_messages.append(created_random_order_text)
+
+ yield other.store(in_order_messages)
+ yield other.store(out_order_messages)
+ yield other.store(random_order_messages)
# send an empty sync message to obtain all messages ALL messages
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (1, 0, 1, 0, []), 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (1, 0, 1, 0, []), 42)
+ yield other.give_message(created_introduction_request, node)
- received = node.receive_messages(names=[u"ASC-text", u"DESC-text", u"RANDOM-text"], return_after=30)
+ received = yield node.receive_messages(names=[u"ASC-text", u"DESC-text", u"RANDOM-text"], return_after=30)
# all ASC-text must be received in-order of their global time (low to high)
received_in_order = [message.distribution.global_time for _, message in received if message.name == u"ASC-text"]
@@ -122,23 +177,41 @@ def test_mixed_order(self):
self.assertNotEqual(received_random_order, sorted([message.distribution.global_time for message in random_order_messages]))
self.assertNotEqual(received_random_order, sorted([message.distribution.global_time for message in random_order_messages], reverse=True))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_priority_order(self):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
+ yield other.send_identity(node)
# OTHER creates messages
- high_priority_messages = [other.create_high_priority_text("Message %d" % i, i + 10) for i in xrange(0, 30, 3)]
- low_priority_messages = [other.create_low_priority_text("Message %d" % i, i + 10) for i in xrange(1, 30, 3)]
- medium_priority_messages = [other.create_medium_priority_text("Message %d" % i, i + 10) for i in xrange(2, 30, 3)]
+ high_priority_messages = []
+ for i in xrange(0, 30, 3):
+ created_high_priority_text = yield other.create_high_priority_text("Message %d" % i, i + 10)
+ high_priority_messages.append(created_high_priority_text)
+
+ low_priority_messages = []
+ for i in xrange(1, 30, 3):
+ created_low_priority_text = yield other.create_low_priority_text("Message %d" % i, i + 10)
+ low_priority_messages.append(created_low_priority_text)
- other.store(high_priority_messages)
- other.store(low_priority_messages)
- other.store(medium_priority_messages)
+ medium_priority_messages = []
+ for i in xrange(2, 30, 3):
+ created_medium_priority_text = yield other.create_medium_priority_text("Message %d" % i, i + 10)
+ medium_priority_messages.append(created_medium_priority_text)
+
+ yield other.store(high_priority_messages)
+ yield other.store(low_priority_messages)
+ yield other.store(medium_priority_messages)
# send an empty sync message to obtain all messages ALL messages
- other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (1, 0, 1, 0, []), 42), node)
+ created_introduction_request = yield node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u"unknown", (1, 0, 1, 0, []), 42)
+ yield other.give_message(created_introduction_request, node)
- received = node.receive_messages(names=[u"high-priority-text", u"low-priority-text", u"medium-priority-text"], return_after=30)
+ received = yield node.receive_messages(names=[u"high-priority-text", u"low-priority-text", u"medium-priority-text"], return_after=30)
# the first should be the high-priority-text
offset = 0
@@ -155,71 +228,87 @@ def test_priority_order(self):
self.assertEqual([message.name for _, message in received[offset:offset + len(low_priority_messages)]],
["low-priority-text"] * len(low_priority_messages))
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_last_1(self):
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
+
+ yield other.send_identity(node)
# send a message
- message = other.create_last_1_test("should be accepted (1)", 10)
- node.give_message(message, other)
- node.assert_is_stored(message)
+ message = yield other.create_last_1_test("should be accepted (1)", 10)
+ yield node.give_message(message, other)
+ yield node.assert_is_stored(message)
# send a message, should replace current one
- new_message = other.create_last_1_test("should be accepted (2)", 11)
- node.give_message(new_message, other)
- node.assert_not_stored(message)
- node.assert_is_stored(new_message)
+ new_message = yield other.create_last_1_test("should be accepted (2)", 11)
+ yield node.give_message(new_message, other)
+ yield node.assert_not_stored(message)
+ yield node.assert_is_stored(new_message)
# send a message (older: should be dropped)
- old_message = other.create_last_1_test("should be dropped (1)", 9)
- node.give_message(old_message, other)
+ old_message = yield other.create_last_1_test("should be dropped (1)", 9)
+ yield node.give_message(old_message, other)
- node.assert_not_stored(message)
- node.assert_is_stored(new_message)
- node.assert_not_stored(old_message)
+ yield node.assert_not_stored(message)
+ yield node.assert_is_stored(new_message)
+ yield node.assert_not_stored(old_message)
# as proof for the drop, the newest message should be sent back
- _, message = other.receive_message(names=[u"last-1-test"]).next()
+ received_message_iterator = yield other.receive_message(names=[u"last-1-test"])
+ _, message = received_message_iterator.next()
self.assertEqual(message.distribution.global_time, new_message.distribution.global_time)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_last_9(self):
- message = self._community.get_meta_message(u"last-9-test")
+ self._community.get_meta_message(u"last-9-test")
+
+ node, other = yield self.create_nodes(2)
+
+ self.nodes = [node, other]
+ self.patch_send_packet_for_nodes()
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ yield other.send_identity(node)
all_messages = [21, 20, 28, 27, 22, 23, 24, 26, 25]
messages_so_far = []
for global_time in all_messages:
# send a message
- message = other.create_last_9_test(str(global_time), global_time)
- node.give_message(message, other)
+ message = yield other.create_last_9_test(str(global_time), global_time)
+ yield node.give_message(message, other)
messages_so_far.append((global_time, message))
for _, message in messages_so_far:
- node.assert_is_stored(message)
+ yield node.assert_is_stored(message)
for global_time in [11, 12, 13, 19, 18, 17]:
# send a message (older: should be dropped)
- node.give_message(other.create_last_9_test(str(global_time), global_time), other)
+ created_last_9_test_text = yield other.create_last_9_test(str(global_time), global_time)
+ yield node.give_message(created_last_9_test_text, other)
for _, message in messages_so_far:
- node.assert_is_stored(message)
+ yield node.assert_is_stored(message)
messages_so_far.sort()
for global_time in [30, 35, 37, 31, 32, 34, 33, 36, 38, 45, 44, 43, 42, 41, 40, 39]:
# send a message (should be added and old one removed)
- message = other.create_last_9_test(str(global_time), global_time)
- node.give_message(message, other)
+ message = yield other.create_last_9_test(str(global_time), global_time)
+ yield node.give_message(message, other)
messages_so_far.pop(0)
messages_so_far.append((global_time, message))
messages_so_far.sort()
for _, message in messages_so_far:
- node.assert_is_stored(message)
+ yield node.assert_is_stored(message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_last_1_doublemember(self):
"""
Normally the LastSyncDistribution policy stores the last N messages for each member that
@@ -239,40 +328,47 @@ def test_last_1_doublemember(self):
these options.
"""
message = self._community.get_meta_message(u"last-1-doublemember-text")
- nodeA, nodeB, nodeC = self.create_nodes(3)
- nodeA.send_identity(nodeB)
- nodeA.send_identity(nodeC)
- nodeB.send_identity(nodeC)
+ nodeA, nodeB, nodeC = yield self.create_nodes(3)
+ yield nodeA.send_identity(nodeB)
+ yield nodeA.send_identity(nodeC)
+ yield nodeB.send_identity(nodeC)
+ @inlineCallbacks
def create_double_signed_message(origin, destination, message, global_time):
origin_mid_pre = origin._community.my_member.mid
destination_mid_pre = destination._community.my_member.mid
assert origin_mid_pre != destination_mid_pre
- submsg = origin.create_last_1_doublemember_text(destination.my_member, message, global_time)
+ submsg = yield origin.create_last_1_doublemember_text(destination.my_member, message, global_time)
assert origin_mid_pre == origin._community.my_member.mid
assert destination_mid_pre == destination._community.my_member.mid
- destination.give_message(origin.create_signature_request(12345, submsg, global_time), origin)
- _, message = origin.receive_message(names=[u"dispersy-signature-response"]).next()
- return (global_time, message.payload.message)
+ created_signature_request = yield origin.create_signature_request(12345, submsg, global_time)
+ yield destination.give_message(created_signature_request, origin)
+ received_message_iterator = yield origin.receive_message(names=[u"dispersy-signature-response"])
+ _, message = received_message_iterator.next()
+ returnValue((global_time, message.payload.message))
+ @inlineCallbacks
def check_database_contents():
# TODO(emilon): This could be done better.
+ @inlineCallbacks
def fetch_rows():
- return list(nodeA._dispersy.database.execute(
+ rows = yield nodeA._dispersy.database.stormdb.fetchall(
u"SELECT sync.global_time, sync.member, double_signed_sync.member1, double_signed_sync.member2, "
u"member1.mid as mid1, member2.mid as mid2 FROM sync "
u"JOIN double_signed_sync ON double_signed_sync.sync = sync.id "
u"JOIN member member1 ON double_signed_sync.member1 = member1.id "
u"JOIN member member2 ON double_signed_sync.member2 = member2.id "
u"WHERE sync.community = ? AND sync.member = ? AND sync.meta_message = ?",
- (nodeA._community.database_id, nodeA.my_member.database_id, message.database_id)))
+ (nodeA._community.database_id, nodeA.my_member.database_id, message.database_id))
+ returnValue(rows)
entries = []
database_ids = {}
- for global_time, member, sync_member1, sync_member2, mid1, mid2 in nodeA.call(fetch_rows):
+ rows = yield nodeA.call(fetch_rows)
+ for global_time, member, sync_member1, sync_member2, mid1, mid2 in rows:
entries.append((global_time, member, sync_member1, sync_member2))
database_ids[str(mid1)] = sync_member1
database_ids[str(mid2)] = sync_member2
@@ -300,25 +396,33 @@ def fetch_rows():
global_time = 10
other_global_time = global_time + 1
messages = []
- messages.append(create_double_signed_message(nodeA, nodeB, "Allow=True (1AB)", global_time))
- messages.append(create_double_signed_message(nodeA, nodeC, "Allow=True (1AC)", other_global_time))
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeB, "Allow=True (1AB)", global_time)
+ messages.append(double_signed_sync_res)
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeC, "Allow=True (1AC)", other_global_time)
+ messages.append(double_signed_sync_res)
# Send a newer set, the previous ones should be dropped
# Those two are the messages that we should have in the DB at the end of the test.
global_time = 20
other_global_time = global_time + 1
- messages.append(create_double_signed_message(nodeA, nodeB, "Allow=True (2AB) @%d" % global_time, global_time))
- messages.append(create_double_signed_message(nodeA, nodeC, "Allow=True (2AC) @%d" % other_global_time, other_global_time))
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeB, "Allow=True (2AB) @%d" % global_time, global_time)
+ messages.append(double_signed_sync_res)
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeC, "Allow=True (2AC) @%d" % other_global_time, other_global_time)
+ messages.append(double_signed_sync_res)
# Send another message (same global time: should be droped)
- messages.append(create_double_signed_message(nodeA, nodeB, "Allow=True Duplicate global time (2ABbis) @%d" % global_time, global_time))
- messages.append(create_double_signed_message(nodeA, nodeC, "Allow=True Duplicate global time (2ACbis) @%d" % other_global_time, other_global_time))
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeB, "Allow=True Duplicate global time (2ABbis) @%d" % global_time, global_time)
+ messages.append(double_signed_sync_res)
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeC, "Allow=True Duplicate global time (2ACbis) @%d" % other_global_time, other_global_time)
+ messages.append(double_signed_sync_res)
# send yet another message (older: should be dropped too)
old_global_time = 8
other_old_global_time = old_global_time + 1
- messages.append(create_double_signed_message(nodeA, nodeB, "Allow=True Should be dropped (1AB)", old_global_time))
- messages.append(create_double_signed_message(nodeA, nodeC, "Allow=True Should be dropped (1AC)", other_old_global_time))
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeB, "Allow=True Should be dropped (1AB)", old_global_time)
+ messages.append(double_signed_sync_res)
+ double_signed_sync_res = yield create_double_signed_message(nodeA, nodeC, "Allow=True Should be dropped (1AC)", other_old_global_time)
+ messages.append(double_signed_sync_res)
current_global_timeB = 0
current_global_timeC = 0
@@ -329,19 +433,20 @@ def fetch_rows():
current_global_timeB = max(global_timeB, current_global_timeB)
current_global_timeC = max(global_timeC, current_global_timeC)
- nodeA.give_messages([messageB, messageC], nodeB)
- check_database_contents()
+ yield nodeA.give_messages([messageB, messageC], nodeB)
+ yield check_database_contents()
# as proof for the drop, the more recent messages should be sent back to nodeB
times = []
- for _, message in nodeB.receive_message(names=[u"last-1-doublemember-text"]):
+ received_messages = yield nodeB.receive_message(names=[u"last-1-doublemember-text"])
+ for _, message in received_messages:
times.append(message.distribution.global_time)
self.assertEqual(sorted(times), [global_time, other_global_time])
# send a message (older + different member combination: should be dropped)
old_global_time = 9
- create_double_signed_message(nodeB, nodeA, "Allow=True (2BA)", old_global_time)
- create_double_signed_message(nodeC, nodeA, "Allow=True (2CA)", old_global_time)
+ yield create_double_signed_message(nodeB, nodeA, "Allow=True (2BA)", old_global_time)
+ yield create_double_signed_message(nodeC, nodeA, "Allow=True (2CA)", old_global_time)
- check_database_contents()
+ yield check_database_contents()
diff --git a/tests/test_timeline.py b/tests/test_timeline.py
index 06633f99..65a7ad43 100644
--- a/tests/test_timeline.py
+++ b/tests/test_timeline.py
@@ -1,29 +1,35 @@
+from twisted.internet.task import deferLater
+
+from nose.twistedtools import deferred, reactor
+from twisted.internet.defer import inlineCallbacks
+
from .dispersytestclass import DispersyTestFunc
class TestTimeline(DispersyTestFunc):
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_delay_by_proof(self):
"""
When OTHER receives a message that it has no permission for, it will send a
dispersy-missing-proof message to try to obtain the dispersy-authorize.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# permit NODE
- proof_msg = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
+ proof_msg = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"authorize")])
# NODE creates message
- tmessage = node.create_protected_full_sync_text("Protected message", 42)
- other.give_message(tmessage, node)
-
+ tmessage = yield node.create_protected_full_sync_text("Protected message", 42)
+ yield other.give_message(tmessage, node)
# must NOT have been stored in the database
- other.assert_not_stored(tmessage)
+ yield other.assert_not_stored(tmessage)
# OTHER sends dispersy-missing-proof to NODE
- responses = node.receive_messages()
+ responses = yield node.receive_messages()
self.assertEqual(len(responses), 1)
for _, message in responses:
self.assertEqual(message.name, u"dispersy-missing-proof")
@@ -31,36 +37,42 @@ def test_delay_by_proof(self):
self.assertEqual(message.payload.global_time, 42)
# NODE provides proof
- other.give_message(proof_msg, node)
+ yield other.give_message(proof_msg, node)
# must have been stored in the database
- other.assert_is_stored(tmessage)
+ yield other.assert_is_stored(tmessage)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_missing_proof(self):
"""
When OTHER receives a dispersy-missing-proof message it needs to find and send the proof.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# permit NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"authorize")])
- node.give_message(authorize, self._mm)
+ yield node.give_message(authorize, self._mm)
- protected_text = node.create_protected_full_sync_text("Protected message", 42)
- node.store([protected_text])
+ protected_text = yield node.create_protected_full_sync_text("Protected message", 42)
+ yield node.store([protected_text])
# OTHER pretends to received the protected message and requests the proof
- node.give_message(other.create_missing_proof(node.my_member, 42), other)
+ created_missing_proof = yield other.create_missing_proof(node.my_member, 42)
+ yield node.give_message(created_missing_proof, other)
# NODE sends dispersy-authorize to OTHER
- _, authorize = other.receive_message(names=[u"dispersy-authorize"]).next()
+ received_message = yield other.receive_message(names=[u"dispersy-authorize"])
+ _, authorize = received_message.next()
permission_triplet = (node.my_member.mid, u"protected-full-sync-text", u"permit")
authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]
self.assertIn(permission_triplet, authorize_permission_triplets)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_missing_authorize_proof(self):
"""
MASTER
@@ -74,19 +86,21 @@ def test_missing_authorize_proof(self):
When NODE receives a dispersy-missing-proof message from OTHER for authorize(MM, NODE)
the dispersy-authorize message for authorize(MASTER, MM) must be returned.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# permit NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit"),
(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"authorize")])
- node.give_message(authorize, self._mm)
+ yield node.give_message(authorize, self._mm)
# OTHER wants the proof that OWNER is allowed to grant authorization to NODE
- node.give_message(other.create_missing_proof(authorize.authentication.member, authorize.distribution.global_time), other)
+ created_missing_proof = yield other.create_missing_proof(authorize.authentication.member, authorize.distribution.global_time)
+ yield node.give_message(created_missing_proof, other)
# NODE sends dispersy-authorize containing authorize(MASTER, OWNER) to OTHER
- _, authorize = other.receive_message(names=[u"dispersy-authorize"]).next()
+ received_message = yield other.receive_message(names=[u"dispersy-authorize"])
+ _, authorize = received_message.next()
permission_triplet = (self._mm.my_member.mid, u"protected-full-sync-text", u"permit")
authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]
diff --git a/tests/test_undo.py b/tests/test_undo.py
index 9e96851a..712aafeb 100644
--- a/tests/test_undo.py
+++ b/tests/test_undo.py
@@ -1,8 +1,17 @@
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from .dispersytestclass import DispersyTestFunc
class TestUndo(DispersyTestFunc):
+ def setUp(self):
+ super(TestUndo, self).setUp()
+ self.nodes = []
+
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_self_undo_own(self):
"""
NODE generates a few messages and then undoes them.
@@ -10,70 +19,95 @@ def test_self_undo_own(self):
This is always allowed. In fact, no check is made since only externally received packets
will be checked.
"""
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
# create messages
- messages = [node.create_full_sync_text("Should undo #%d" % i, i + 10) for i in xrange(10)]
- node.give_messages(messages, node)
+ messages = []
+ for i in xrange(10):
+ created_full_sync_text = yield node.create_full_sync_text("Should undo #%d" % i, i + 10)
+ messages.append(created_full_sync_text)
+
+ yield node.give_messages(messages, node)
# check that they are in the database and are NOT undone
- node.assert_is_stored(messages=messages)
+ yield node.assert_is_stored(messages=messages)
# undo all messages
- undoes = [node.create_undo_own(message, i + 100, i + 1) for i, message in enumerate(messages)]
+ undoes = []
+ for i, message in enumerate(messages):
+ create_undo_own_message = yield node.create_undo_own(message, i + 100, i + 1)
+ undoes.append(create_undo_own_message)
- node.give_messages(undoes, node)
+ yield node.give_messages(undoes, node)
# check that they are in the database and ARE undone
- node.assert_is_undone(messages=messages)
- node.assert_is_stored(messages=undoes)
+ yield node.assert_is_undone(messages=messages)
+ yield node.assert_is_stored(messages=undoes)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_node_undo_other(self):
"""
MM gives NODE permission to undo, OTHER generates a few messages and then NODE undoes
them.
"""
- node, other = self.create_nodes(2)
- other.send_identity(node)
+ node, other = yield self.create_nodes(2)
+ yield other.send_identity(node)
# MM grants undo permission to NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], self._mm.claim_global_time())
- node.give_message(authorize, self._mm)
- other.give_message(authorize, self._mm)
+ mm_claimed_global_time = yield self._mm.claim_global_time()
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], mm_claimed_global_time)
+ yield node.give_message(authorize, self._mm)
+ yield other.give_message(authorize, self._mm)
# OTHER creates messages
- messages = [other.create_full_sync_text("Should undo #%d" % i, i + 10) for i in xrange(10)]
- node.give_messages(messages, other)
+ messages = []
+ for i in xrange(10):
+ created_full_sync_text = yield other.create_full_sync_text("Should undo #%d" % i, i + 10)
+ messages.append(created_full_sync_text)
+
+ yield node.give_messages(messages, other)
# check that they are in the database and are NOT undone
- node.assert_is_stored(messages=messages)
+ yield node.assert_is_stored(messages=messages)
# NODE undoes all messages
- undoes = [node.create_undo_other(message, message.distribution.global_time + 100, 1 + i) for i, message in enumerate(messages)]
- node.give_messages(undoes, node)
+ undoes = []
+ for i, message in enumerate(messages):
+ create_undo_other_message = yield node.create_undo_other(message, message.distribution.global_time + 100, 1 + i)
+ undoes.append(create_undo_other_message)
+
+ yield node.give_messages(undoes, node)
# check that they are in the database and ARE undone
- node.assert_is_undone(messages=messages)
- node.assert_is_stored(messages=undoes)
+ yield node.assert_is_undone(messages=messages)
+ yield node.assert_is_stored(messages=undoes)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_self_attempt_undo_twice(self):
"""
NODE generated a message and then undoes it twice. The dispersy core should ensure that
that the second undo is refused and the first undo message should be returned instead.
"""
- node, = self.create_nodes(1)
+ node, = yield self.create_nodes(1)
# create message
- message = node.create_full_sync_text("Should undo @%d" % 1, 1)
- node.give_message(message, node)
+ message = yield node.create_full_sync_text("Should undo @%d" % 1, 1)
+ yield node.give_message(message, node)
# undo twice
+ @inlineCallbacks
def create_undoes():
- return node._community.create_undo(message), node._community.create_undo(message)
- undo1, undo2 = node.call(create_undoes)
+ u1 = yield node._community.create_undo(message)
+ u2 = yield node._community.create_undo(message)
+ returnValue((u1, u2))
+ undo1, undo2 = yield node.call(create_undoes)
self.assertEqual(undo1.packet, undo2.packet)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_node_resolve_undo_twice(self):
"""
Make sure that in the event of receiving two undo messages from the same member, both will be stored,
@@ -83,29 +117,31 @@ def test_node_resolve_undo_twice(self):
Both messages should be kept and the lowest one should be undone.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# MM grants undo permission to NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], self._mm.claim_global_time())
- node.give_message(authorize, self._mm)
- other.give_message(authorize, self._mm)
+ mm_claimed_global_time = yield self._mm.claim_global_time()
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], mm_claimed_global_time)
+ yield node.give_message(authorize, self._mm)
+ yield other.give_message(authorize, self._mm)
# create message
- message = node.create_full_sync_text("Should undo @%d" % 10, 10)
+ message = yield node.create_full_sync_text("Should undo @%d" % 10, 10)
# create undoes
- undo1 = node.create_undo_own(message, 11, 1)
- undo2 = node.create_undo_own(message, 12, 2)
+ undo1 = yield node.create_undo_own(message, 11, 1)
+ undo2 = yield node.create_undo_own(message, 12, 2)
low_message, high_message = sorted([undo1, undo2], key=lambda message: message.packet)
- other.give_message(message, node)
- other.give_message(low_message, node)
- other.give_message(high_message, node)
+ yield other.give_message(message, node)
+ yield other.give_message(low_message, node)
+ yield other.give_message(high_message, node)
# OTHER should send the first message back when receiving
# the second one (its "higher" than the one just received)
undo_packets = []
- for candidate, b in node.receive_packets():
+ received_packets = yield node.receive_packets()
+ for candidate, b in received_packets:
self._logger.debug(candidate)
self._logger.debug(type(b))
self._logger.debug("%d", len(b))
@@ -118,23 +154,27 @@ def test_node_resolve_undo_twice(self):
for x in undo_packets:
self._logger.debug("loop%d", len(x))
+ @inlineCallbacks
def fetch_all_messages():
- for row in list(other._dispersy.database.execute(u"SELECT * FROM sync")):
+ other_rows = yield other._dispersy.database.stormdb.fetchall(u"SELECT * FROM sync")
+ for row in other_rows:
self._logger.debug("_______ %s", row)
- other.call(fetch_all_messages)
+ yield other.call(fetch_all_messages)
self._logger.debug("%d", len(low_message.packet))
self.assertEqual(undo_packets, [low_message.packet])
# NODE should have both messages on the database and the lowest one should be undone by the highest.
- messages = other.fetch_messages((u"dispersy-undo-own",))
+ messages = yield other.fetch_messages((u"dispersy-undo-own",))
self.assertEquals(len(messages), 2)
- other.assert_is_done(low_message)
- other.assert_is_undone(high_message)
- other.assert_is_undone(high_message, undone_by=low_message)
- other.assert_is_undone(message, undone_by=low_message)
+ yield other.assert_is_stored(low_message)
+ yield other.assert_is_undone(high_message)
+ yield other.assert_is_undone(high_message, undone_by=low_message)
+ yield other.assert_is_undone(message, undone_by=low_message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_missing_message(self):
"""
NODE generates a few messages without sending them to OTHER. Following, NODE undoes the
@@ -142,94 +182,111 @@ def test_missing_message(self):
to request the messages that are about to be undone. The messages need to be processed and
subsequently undone.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ self.nodes.append(node)
+ self.nodes.append(other)
+ self.patch_send_packet_for_nodes()
+ yield node.send_identity(other)
# create messages
- messages = [node.create_full_sync_text("Should undo @%d" % i, i + 10) for i in xrange(10)]
+ messages = []
+ for i in xrange(10):
+ created_full_sync_text = yield node.create_full_sync_text("Should undo @%d" % i, i + 10)
+ messages.append(created_full_sync_text)
# undo all messages
- undoes = [node.create_undo_own(message, message.distribution.global_time + 100, i + 1) for i, message in enumerate(messages)]
+ undoes = []
+ for i, message in enumerate(messages):
+ create_undo_own_message = yield node.create_undo_own(message, message.distribution.global_time + 100, i + 1)
+ undoes.append(create_undo_own_message)
# send undoes to OTHER
- other.give_messages(undoes, node)
+ yield other.give_messages(undoes, node)
# receive the dispersy-missing-message messages
global_times = [message.distribution.global_time for message in messages]
global_time_requests = []
- for _, message in node.receive_messages(names=[u"dispersy-missing-message"]):
+ received_messages = yield node.receive_messages(names=[u"dispersy-missing-message"])
+ for _, message in received_messages:
self.assertEqual(message.payload.member.public_key, node.my_member.public_key)
global_time_requests.extend(message.payload.global_times)
self.assertEqual(sorted(global_times), sorted(global_time_requests))
# give all 'delayed' messages
- other.give_messages(messages, node)
+ yield other.give_messages(messages, node)
# check that they are in the database and ARE undone
- other.assert_is_undone(messages=messages)
- other.assert_is_stored(messages=undoes)
+ yield other.assert_is_undone(messages=messages)
+ yield other.assert_is_stored(messages=undoes)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_revoke_causing_undo(self):
"""
SELF gives NODE permission to undo, OTHER created a message, NODE undoes the message, SELF
revokes the undo permission AFTER the message was undone -> the message is re-done.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# MM grants undo permission to NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], self._mm.claim_global_time())
- node.give_message(authorize, self._mm)
- other.give_message(authorize, self._mm)
+ mm_claimed_global_time = yield self._mm.claim_global_time()
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")], mm_claimed_global_time)
+ yield node.give_message(authorize, self._mm)
+ yield other.give_message(authorize, self._mm)
# OTHER creates a message
- message = other.create_full_sync_text("will be undone", 42)
- other.give_message(message, other)
- other.assert_is_stored(message)
+ message = yield other.create_full_sync_text("will be undone", 42)
+ yield other.give_message(message, other)
+ yield other.assert_is_stored(message)
# NODE undoes the message
- undo = node.create_undo_other(message, message.distribution.global_time + 1, 1)
- other.give_message(undo, node)
- other.assert_is_undone(message)
- other.assert_is_stored(undo)
+ undo = yield node.create_undo_other(message, message.distribution.global_time + 1, 1)
+ yield other.give_message(undo, node)
+ yield other.assert_is_undone(message)
+ yield other.assert_is_stored(undo)
# SELF revoke undo permission from NODE, as the globaltime of the mm is lower than 42 the message needs to be done
- revoke = self._mm.create_revoke([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")])
- other.give_message(revoke, self._mm)
- other.assert_is_done(message)
+ revoke = yield self._mm.create_revoke([(node.my_member, self._community.get_meta_message(u"full-sync-text"), u"undo")])
+ yield other.give_message(revoke, self._mm)
+ yield other.assert_is_stored(message)
+ @deferred(timeout=10)
+ @inlineCallbacks
def test_revoke_causing_undo_permitted(self):
"""
SELF gives NODE permission to undo, OTHER created a message, NODE undoes the message, SELF
revokes the undo permission AFTER the message was undone -> the message is re-done.
"""
- node, other = self.create_nodes(2)
- node.send_identity(other)
+ node, other = yield self.create_nodes(2)
+ yield node.send_identity(other)
# MM grants permit permission to OTHER
- authorize = self._mm.create_authorize([(other.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit")], self._mm.claim_global_time())
- node.give_message(authorize, self._mm)
- other.give_message(authorize, self._mm)
+ mm_claimed_global_time = yield self._mm.claim_global_time()
+ authorize = yield self._mm.create_authorize([(other.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"permit")], mm_claimed_global_time)
+ yield node.give_message(authorize, self._mm)
+ yield other.give_message(authorize, self._mm)
# MM grants undo permission to NODE
- authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"undo")], self._mm.claim_global_time())
- node.give_message(authorize, self._mm)
- other.give_message(authorize, self._mm)
+ mm_claimed_global_time = yield self._mm.claim_global_time()
+ authorize = yield self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"undo")], mm_claimed_global_time)
+ yield node.give_message(authorize, self._mm)
+ yield other.give_message(authorize, self._mm)
# OTHER creates a message
- message = other.create_protected_full_sync_text("will be undone", 42)
- other.give_message(message, other)
- other.assert_is_stored(message)
+ message = yield other.create_protected_full_sync_text("will be undone", 42)
+ yield other.give_message(message, other)
+ yield other.assert_is_stored(message)
# NODE undoes the message
- undo = node.create_undo_other(message, message.distribution.global_time + 1, 1)
- other.give_message(undo, node)
- other.assert_is_undone(message)
- other.assert_is_stored(undo)
+ undo = yield node.create_undo_other(message, message.distribution.global_time + 1, 1)
+ yield other.give_message(undo, node)
+ yield other.assert_is_undone(message)
+ yield other.assert_is_stored(undo)
# SELF revoke undo permission from NODE, as the globaltime of the mm is lower than 42 the message needs to be done
- revoke = self._mm.create_revoke([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"undo")])
- other.give_message(revoke, self._mm)
- other.assert_is_done(message)
+ revoke = yield self._mm.create_revoke([(node.my_member, self._community.get_meta_message(u"protected-full-sync-text"), u"undo")])
+ yield other.give_message(revoke, self._mm)
+ yield other.assert_is_stored(message)
diff --git a/tests/test_walker.py b/tests/test_walker.py
index 8ece19ae..6aa558db 100644
--- a/tests/test_walker.py
+++ b/tests/test_walker.py
@@ -1,30 +1,54 @@
+from nose.twistedtools import deferred
+from twisted.internet.defer import inlineCallbacks, returnValue
+
from .dispersytestclass import DispersyTestFunc
class TestWalker(DispersyTestFunc):
+ @deferred(timeout=10)
def test_one_walker(self): return self.check_walker([""])
+
+ @deferred(timeout=10)
def test_two_walker(self): return self.check_walker(["", ""])
+
+ @deferred(timeout=10)
def test_many_walker(self): return self.check_walker([""] * 22)
+
+ @deferred(timeout=10)
def test_one_t_walker(self): return self.check_walker(["t"])
+
+ @deferred(timeout=10)
def test_two_t_walker(self): return self.check_walker(["t", "t"])
+
+ @deferred(timeout=10)
def test_many_t_walker(self): return self.check_walker(["t"] * 22)
+
+ @deferred(timeout=10)
def test_two_mixed_walker_a(self): return self.check_walker(["", "t"])
+
+ @deferred(timeout=10)
def test_many_mixed_walker_a(self): return self.check_walker(["", "t"] * 11)
+
+ @deferred(timeout=10)
def test_two_mixed_walker_b(self): return self.check_walker(["t", ""])
+
+ @deferred(timeout=10)
def test_many_mixed_walker_b(self): return self.check_walker(["t", ""] * 11)
+ @inlineCallbacks
def create_others(self, all_flags):
assert isinstance(all_flags, list)
assert all(isinstance(flags, str) for flags in all_flags)
nodes = []
for flags in all_flags:
- node, = self.create_nodes(tunnel="t" in flags)
+ node, = yield self.create_nodes(tunnel="t" in flags)
nodes.append(node)
- return nodes
+ returnValue(nodes)
+ @inlineCallbacks
def check_walker(self, all_flags):
"""
All nodes will perform a introduction request to SELF in one batch.
@@ -32,10 +56,12 @@ def check_walker(self, all_flags):
assert isinstance(all_flags, list)
assert all(isinstance(flags, str) for flags in all_flags)
- nodes = self.create_others(all_flags)
+ nodes = yield self.create_others(all_flags)
# create all requests
- requests = [node.create_introduction_request(self._mm.my_candidate,
+ requests = []
+ for identifier, node in enumerate(nodes, 1):
+ created_introduction_request = yield node.create_introduction_request(self._mm.my_candidate,
node.lan_address,
node.wan_address,
True,
@@ -43,20 +69,23 @@ def check_walker(self, all_flags):
None,
identifier,
42)
- for identifier, node
- in enumerate(nodes, 1)]
+ requests.append(created_introduction_request)
# give all requests in one batch to dispersy
- self._mm.call(self._dispersy.on_incoming_packets, [(node.my_candidate, node.encode_message(request))
- for node, request
- in zip(nodes, requests)])
+ incoming_packets = yield self._dispersy.on_incoming_packets
+ encoded_messages= []
+ for node, request in zip(nodes, requests):
+ encoded_message = yield node.encode_message(request)
+ encoded_messages.append((node.my_candidate, encoded_message))
+ yield self._mm.call(incoming_packets, encoded_messages)
is_tunnelled_map = dict([(node.lan_address, node.tunnel) for node in nodes])
num_tunnelled_nodes = len([node for node in nodes if node.tunnel])
num_non_tunnelled_nodes = len([node for node in nodes if not node.tunnel])
for node in nodes:
- _, response = node.receive_message().next()
+ received_message = yield node.receive_message()
+ _, response = received_message.next()
# MM must not introduce NODE to itself
self.assertNotEquals(response.payload.lan_introduction_address, node.lan_address)
diff --git a/tool/main.py b/tool/main.py
index 3c39f0e8..df7c3dc2 100644
--- a/tool/main.py
+++ b/tool/main.py
@@ -9,6 +9,7 @@
from twisted.internet import reactor
from twisted.python.log import addObserver
+from ..util import blockingCallFromThread
from ..dispersy import Dispersy
from ..endpoint import StandaloneEndpoint
@@ -80,17 +81,19 @@ def main_real(setup=None):
# setup
dispersy = Dispersy(StandaloneEndpoint(opt.port, opt.ip), unicode(opt.statedir), unicode(opt.databasefile))
+ blockingCallFromThread(reactor, dispersy.initialize_statistics)
dispersy.statistics.enable_debug_statistics(opt.debugstatistics)
def signal_handler(sig, frame):
logger.warning("Received signal '%s' in %s (shutting down)", sig, frame)
- dispersy.stop()
+ blockingCallFromThread(reactor, dispersy.stop)
reactor.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# start
- if not dispersy.start():
+ dispersy_started = yield dispersy.start()
+ if not dispersy_started:
raise RuntimeError("Unable to start Dispersy")
# This has to be scheduled _after_ starting dispersy so the DB is opened by when this is actually executed.
diff --git a/tracker/community.py b/tracker/community.py
index 53e4d31d..d81a4b3f 100644
--- a/tracker/community.py
+++ b/tracker/community.py
@@ -1,3 +1,5 @@
+from twisted.internet.defer import returnValue, inlineCallbacks
+
from ..community import Community, HardKilledCommunity
from ..conversion import BinaryConversion
from ..exception import ConversionNotFoundException
@@ -113,6 +115,7 @@ def get_conversion_for_packet(self, packet):
def take_step(self):
raise RuntimeError("a tracker should not walk")
+ @inlineCallbacks
def dispersy_cleanup_community(self, message):
# since the trackers use in-memory databases, we need to store the destroy-community
# message, and all associated proof, separately.
@@ -124,7 +127,7 @@ def dispersy_cleanup_community(self, message):
write("# received dispersy-destroy-community from %s\n" % (str(message.candidate),))
identity_id = self._meta_messages[u"dispersy-identity"].database_id
- execute = self._dispersy.database.execute
+ fetchone = self._dispersy.database.stormdb.fetchone
messages = [message]
stored = set()
while messages:
@@ -136,9 +139,9 @@ def dispersy_cleanup_community(self, message):
if not message.authentication.member.public_key in stored:
try:
- packet, = execute(u"SELECT packet FROM sync WHERE meta_message = ? AND member = ?", (
- identity_id, message.authentication.member.database_id)).next()
- except StopIteration:
+ packet, = yield fetchone(u"SELECT packet FROM sync WHERE meta_message = ? AND member = ?", (
+ identity_id, message.authentication.member.database_id))
+ except TypeError:
pass
else:
write(" ".join(("dispersy-identity", str(packet).encode("HEX"), "\n")))
@@ -146,8 +149,9 @@ def dispersy_cleanup_community(self, message):
_, proofs = self._timeline.check(message)
messages.extend(proofs)
- return TrackerHardKilledCommunity
+ returnValue(TrackerHardKilledCommunity)
+ @inlineCallbacks
def on_introduction_request(self, messages):
if not self._dispersy._silent:
hex_cid = self.cid.encode("HEX")
@@ -158,7 +162,8 @@ def on_introduction_request(self, messages):
ord(message.conversion.dispersy_version),
ord(message.conversion.community_version), host, port
- return super(TrackerCommunity, self).on_introduction_request(messages)
+ res = yield super(TrackerCommunity, self).on_introduction_request(messages)
+ returnValue(res)
def on_introduction_response(self, messages):
if not self._dispersy._silent:
diff --git a/twisted/plugins/tracker_plugin.py b/twisted/plugins/tracker_plugin.py
index caf65ad2..84b314d6 100644
--- a/twisted/plugins/tracker_plugin.py
+++ b/twisted/plugins/tracker_plugin.py
@@ -34,6 +34,7 @@
from twisted.application.service import IServiceMaker, MultiService
from twisted.conch import manhole_tap
from twisted.internet import reactor
+from twisted.internet.defer import returnValue, inlineCallbacks
from twisted.internet.task import LoopingCall
from twisted.logger import globalLogPublisher
from twisted.plugin import IPlugin
@@ -69,40 +70,49 @@ def __init__(self, endpoint, working_directory, silent=False, crypto=NoVerifyCry
self._silent = silent
self._my_member = None
+ @inlineCallbacks
def start(self):
assert isInIOThread()
- if super(TrackerDispersy, self).start():
- self._create_my_member()
- self._load_persistent_storage()
+ yield self.initialize_statistics()
+ tracker_started = yield super(TrackerDispersy, self).start()
+ if tracker_started:
+ yield self._create_my_member()
+ yield self._load_persistent_storage()
self.register_task("unload inactive communities",
LoopingCall(self.unload_inactive_communities)).start(COMMUNITY_CLEANUP_INTERVAL)
- self.define_auto_load(TrackerCommunity, self._my_member)
- self.define_auto_load(TrackerHardKilledCommunity, self._my_member)
+ yield self.define_auto_load(TrackerCommunity, self._my_member)
+ yield self.define_auto_load(TrackerHardKilledCommunity, self._my_member)
if not self._silent:
self._statistics_looping_call = LoopingCall(self._report_statistics)
self._statistics_looping_call.start(300)
- return True
- return False
+ returnValue(True)
+ returnValue(False)
+ @inlineCallbacks
def _create_my_member(self):
# generate a new my-member
ec = self.crypto.generate_key(u"very-low")
- self._my_member = self.get_member(private_key=self.crypto.key_to_bin(ec))
+ self._my_member = yield self.get_member(private_key=self.crypto.key_to_bin(ec))
@property
def persistent_storage_filename(self):
return self._persistent_storage_filename
+ @inlineCallbacks
def get_community(self, cid, load=False, auto_load=True):
try:
- return super(TrackerDispersy, self).get_community(cid, True, True)
+ community = yield super(TrackerDispersy, self).get_community(cid, True, True)
+ returnValue(community)
except CommunityNotFoundException:
- return TrackerCommunity.init_community(self, self.get_member(mid=cid), self._my_member)
+ member = yield self.get_member(mid=cid)
+ community = yield TrackerCommunity.init_community(self, member, self._my_member)
+ returnValue(community)
+ @inlineCallbacks
def _load_persistent_storage(self):
# load all destroyed communities
try:
@@ -115,8 +125,8 @@ def _load_persistent_storage(self):
candidate = LoopbackCandidate()
for pkt in reversed(packets):
try:
- self.on_incoming_packets([(candidate, pkt)], cache=False, timestamp=time())
- except:
+ yield self.on_incoming_packets([(candidate, pkt)], cache=False, timestamp=time())
+ except Exception:
self._logger.exception("Error while loading from persistent-destroy-community.data")
def unload_inactive_communities(self):
@@ -214,6 +224,7 @@ def makeService(self, options):
tracker_service.addService(manhole)
manhole.startService()
+ @inlineCallbacks
def run():
# setup
dispersy = TrackerDispersy(StandaloneEndpoint(options["port"],
@@ -238,7 +249,8 @@ def signal_handler(sig, frame):
signal.signal(signal.SIGTERM, signal_handler)
# start
- if not dispersy.start():
+ start_result = yield dispersy.start()
+ if not start_result:
raise RuntimeError("Unable to start Dispersy")
# wait forever
diff --git a/util.py b/util.py
index 08ab2f8d..f9a26904 100644
--- a/util.py
+++ b/util.py
@@ -13,6 +13,7 @@
from struct import unpack_from
from twisted.internet import reactor, defer
+from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall
from twisted.python import failure
from twisted.python.threadable import isInIOThread
@@ -143,12 +144,13 @@ def foo(self, bar, moo='milk'):
def helper(func):
@functools.wraps(func)
+ @inlineCallbacks
def wrapper(*args, **kargs):
return_value = None
start = time()
try:
- return_value = func(*args, **kargs)
- return return_value
+ return_value = yield func(*args, **kargs)
+ returnValue(return_value)
finally:
end = time()
entry = format_.format(function_name=func.__name__, return_value=return_value, *args, **kargs)