Skip to content

Run tests using network-transport-inmemory #19

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 6, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 33 additions & 11 deletions distributed-process-tests.cabal
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ category: Control, Cloud Haskell
build-type: Simple
cabal-version: >=1.8

flag tcp
Description: build and run TCP tests
Default: False

library
exposed-modules: Network.Transport.Test
Control.Distributed.Process.Tests.CH
Expand Down Expand Up @@ -49,43 +53,61 @@ library
if impl(ghc <= 7.4.2)
Build-Depends: ghc-prim == 0.2.0.0

Test-Suite TestCH
Test-Suite TestCHInMemory
Type: exitcode-stdio-1.0
Main-Is: runTCP.hs
Main-Is: runInMemory.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.CH
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
network-transport-inmemory >= 0.5,
test-framework >= 0.6 && < 0.9
Extensions: CPP
ghc-options: -Wall -threaded -debug -eventlog -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
HS-Source-Dirs: tests

Test-Suite TestClosure
Test-Suite TestCHInTCP
Type: exitcode-stdio-1.0
Main-Is: runTCP.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.CH
if flag(tcp)
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
test-framework >= 0.6 && < 0.9
else
Buildable: False
Extensions: CPP
ghc-options: -Wall -threaded -debug -eventlog -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
HS-Source-Dirs: tests


Test-Suite TestClosure
Type: exitcode-stdio-1.0
Main-Is: runInMemory.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.Closure
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
network-transport-inmemory >= 0.5,
test-framework >= 0.6 && < 0.9
Extensions: CPP
ghc-options: -Wall -threaded -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
HS-Source-Dirs: tests

Test-Suite TestStats
Type: exitcode-stdio-1.0
Main-Is: runTCP.hs
Main-Is: runInMemory.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.Stats
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
network-transport-inmemory >= 0.5,
test-framework >= 0.6 && < 0.9
Extensions: CPP
ghc-options: -Wall -debug -eventlog -threaded -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
Expand All @@ -94,27 +116,27 @@ Test-Suite TestStats

Test-Suite TestMx
Type: exitcode-stdio-1.0
Main-Is: runTCP.hs
Main-Is: runInMemory.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.Mx
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
network-transport-inmemory >= 0.5,
test-framework >= 0.6 && < 0.9
Extensions: CPP
ghc-options: -Wall -debug -threaded -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
HS-Source-Dirs: tests

Test-Suite TestTracing
Type: exitcode-stdio-1.0
Main-Is: runTCP.hs
Main-Is: runInMemory.hs
CPP-Options: -DTEST_SUITE_MODULE=Control.Distributed.Process.Tests.Tracing
Build-Depends: base >= 4.4 && < 5,
distributed-process-tests,
network >= 2.3 && < 2.7,
network-transport >= 0.4.1.0 && < 0.5,
network-transport-tcp >= 0.3 && < 0.5,
network-transport-inmemory >= 0.5,
test-framework >= 0.6 && < 0.9
Extensions: CPP
ghc-options: -Wall -debug -eventlog -threaded -rtsopts -with-rtsopts=-N -fno-warn-unused-do-bind
Expand Down
39 changes: 32 additions & 7 deletions src/Control/Distributed/Process/Tests/CH.hs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import Control.Monad (replicateM_, replicateM, forever, void, unless)
import Control.Exception (SomeException, throwIO)
import qualified Control.Exception as Ex (catch)
import Control.Applicative ((<$>), (<*>), pure, (<|>))
import qualified Network.Transport as NT (closeEndPoint)
import qualified Network.Transport as NT (closeEndPoint, EndPointAddress)
import Control.Distributed.Process
import Control.Distributed.Process.Internal.Types
( NodeId(nodeAddress)
Expand Down Expand Up @@ -302,19 +302,25 @@ testMonitorRemoteDeadProcess TestTransport{..} mOrL un = do
testMonitorDisconnect :: TestTransport -> Bool -> Bool -> Assertion
testMonitorDisconnect TestTransport{..} mOrL un = do
processAddr <- newEmptyMVar
processAddr2 <- newEmptyMVar
monitorSetup <- newEmptyMVar
done <- newEmptyMVar

forkIO $ do
localNode <- newLocalNode testTransport initRemoteTable
addr <- forkProcess localNode . liftIO $ threadDelay 1000000
addr <- forkProcess localNode $ expect
addr2 <- forkProcess localNode $ return ()
putMVar processAddr addr
readMVar monitorSetup
NT.closeEndPoint (localEndPoint localNode)
putMVar processAddr2 addr2

forkIO $ do
localNode <- newLocalNode testTransport initRemoteTable
theirAddr <- readMVar processAddr
forkProcess localNode $ do
lc <- liftIO $ readMVar processAddr2
send lc ()
runProcess localNode $ do
monitorTestProcess theirAddr mOrL un DiedDisconnect (Just monitorSetup) done

Expand Down Expand Up @@ -583,17 +589,22 @@ testMonitorLiveNode :: TestTransport -> Assertion
testMonitorLiveNode TestTransport{..} = do
[node1, node2] <- replicateM 2 $ newLocalNode testTransport initRemoteTable
ready <- newEmptyMVar
readyr <- newEmptyMVar
done <- newEmptyMVar

p <- forkProcess node1 $ return ()
forkProcess node2 $ do
ref <- monitorNode (localNodeId node1)
liftIO $ putMVar ready ()
liftIO $ takeMVar readyr
send p ()
NodeMonitorNotification ref' nid _ <- expect
True <- return $ ref == ref' && nid == localNodeId node1
liftIO $ putMVar done ()

takeMVar ready
closeLocalNode node1
putMVar readyr ()

takeMVar done

Expand Down Expand Up @@ -688,7 +699,6 @@ testReconnect :: TestTransport -> Assertion
testReconnect TestTransport{..} = do
[node1, node2] <- replicateM 2 $ newLocalNode testTransport initRemoteTable
let nid1 = localNodeId node1
nid2 = localNodeId node2
processA <- newEmptyMVar
[sendTestOk, registerTestOk] <- replicateM 2 newEmptyMVar

Expand All @@ -709,7 +719,8 @@ testReconnect TestTransport{..} = do
send them "message 1" >> liftIO (threadDelay 100000)

-- Simulate network failure
liftIO $ testBreakConnection (nodeAddress nid1) (nodeAddress nid2)
liftIO $ syncBreakConnection testBreakConnection node1 node2


-- Should not arrive
send them "message 2"
Expand All @@ -734,7 +745,7 @@ testReconnect TestTransport{..} = do


-- Simulate network failure
liftIO $ testBreakConnection (nodeAddress nid1) (nodeAddress nid2)
liftIO $ syncBreakConnection testBreakConnection node1 node2

-- This will happen due to implicit reconnect
registerRemoteAsync nid1 "b" us
Expand Down Expand Up @@ -1320,7 +1331,7 @@ testUnsafeSendChan TestTransport{..} = do

tests :: TestTransport -> IO [Test]
tests testtrans = return [
testGroup "Basic features" [
testGroup "Basic features" [
testCase "Ping" (testPing testtrans)
, testCase "Math" (testMath testtrans)
, testCase "Timeout" (testTimeout testtrans)
Expand Down Expand Up @@ -1356,7 +1367,7 @@ tests testtrans = return [
-- usend
, testCase "USend" (testUSend testtrans 50)
]
, testGroup "Monitoring and Linking" [
, testGroup "Monitoring and Linking" [
-- Monitoring processes
--
-- The "missing" combinations in the list below don't make much sense, as
Expand Down Expand Up @@ -1388,3 +1399,17 @@ tests testtrans = return [
, testCase "Reconnect" (testReconnect testtrans)
]
]

syncBreakConnection :: (NT.EndPointAddress -> NT.EndPointAddress -> IO ()) -> LocalNode -> LocalNode -> IO ()
syncBreakConnection breakConnection nid0 nid1 = do
m <- newEmptyMVar
_ <- forkProcess nid1 $ getSelfPid >>= liftIO . putMVar m
runProcess nid0 $ do
them <- liftIO $ takeMVar m
pinger <- spawnLocal $ forever $ send them ()
_ <- monitorNode (localNodeId nid1)
liftIO $ breakConnection (nodeAddress $ localNodeId nid0)
(nodeAddress $ localNodeId nid1)
NodeMonitorNotification _ _ _ <- expect
kill pinger "finished"
return ()
31 changes: 31 additions & 0 deletions tests/runInMemory.hs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
-- Run tests using the TCP transport.

module Main where

import TEST_SUITE_MODULE (tests)

import Network.Transport.Test (TestTransport(..))
import Network.Transport.InMemory
import Test.Framework (defaultMainWithArgs)

import Control.Concurrent (threadDelay)
import System.Environment (getArgs)

main :: IO ()
main = do
(transport, internals) <- createTransportExposeInternals
ts <- tests TestTransport
{ testTransport = transport
, testBreakConnection = \addr1 addr2 -> breakConnection internals addr1 addr2 "user error"
}
args <- getArgs
-- Tests are time sensitive. Running the tests concurrently can slow them
-- down enough that threads using threadDelay would wake up later than
-- expected, thus changing the order in which messages were expected.
-- Therefore we run the tests sequentially by passing "-j 1" to
-- test-framework. This does not solve the issue but makes it less likely.
--
-- The problem was first detected with
-- 'Control.Distributed.Process.Tests.CH.testMergeChannels'
-- in particular.
defaultMainWithArgs ts ("-j" : "1" : args)
2 changes: 1 addition & 1 deletion tests/runTCP.hs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
-- Run tests using the TCP transport.

--
module Main where

import TEST_SUITE_MODULE (tests)
Expand Down