king: In non-daemon-mode, ^D now actually brings down the executable.

This commit is contained in:
~siprel 2020-06-06 21:33:15 +00:00
parent 45d7ac6150
commit bd51337156
6 changed files with 37 additions and 26 deletions

View File

@ -19,7 +19,7 @@ Stubbed out:
Bugs: Bugs:
- [ ] In non-daemon-mode, ^D doesn't bring down Urbit properly. - [x] In non-daemon-mode, ^D doesn't bring down Urbit properly.
King-Haskell specific features: King-Haskell specific features:

View File

@ -25,9 +25,6 @@ import System.Posix.Internals (c_getpid)
import System.Posix.Types (CPid(..)) import System.Posix.Types (CPid(..))
import System.Random (randomIO) import System.Random (randomIO)
-- Constraints -----------------------------------------------------------------
-- KingEnv --------------------------------------------------------------------- -- KingEnv ---------------------------------------------------------------------

View File

@ -540,20 +540,25 @@ newShip CLI.New{..} opts = do
runShip :: CLI.Run -> CLI.Opts -> Bool -> TMVar () -> MultiEyreApi -> RIO KingEnv () runShip :: CLI.Run -> CLI.Opts -> Bool -> TMVar () -> MultiEyreApi -> RIO KingEnv ()
runShip (CLI.Run pierPath) opts daemon vKill multi = do runShip (CLI.Run pierPath) opts daemon vKill multi = do
tid <- io myThreadId thisTid <- io myThreadId
let onTermExit = throwTo tid UserInterrupt mStart <- newEmptyMVar
mStart <- newEmptyMVar
if daemon if daemon
then runPier mStart then runPier mStart
else do else do
-- Wait until the pier has started up, then connect a terminal. If
-- the terminal ever shuts down, ask the ship to go down.
connectionThread <- async $ do connectionThread <- async $ do
readMVar mStart readMVar mStart
finally (connTerm pierPath) onTermExit finally (connTerm pierPath) $ do
finally (runPier mStart) (cancel connectionThread) atomically (tryPutTMVar vKill ())
-- Run the pier until it finishes, and then kill the terminal.
finally (runPier mStart) $ do
cancel connectionThread
where where
runPier mStart = runPier mStart = do
runPierEnv pierConfig networkConfig $ runPierEnv pierConfig networkConfig $
tryPlayShip tryPlayShip
(CLI.oExit opts) (CLI.oExit opts)
(CLI.oFullReplay opts) (CLI.oFullReplay opts)
(CLI.oDryFrom opts) (CLI.oDryFrom opts)
@ -736,20 +741,20 @@ runSingleShip (r, o, d) multi = do
shipThread <- async (runShipNoRestart vKill r o d multi) shipThread <- async (runShipNoRestart vKill r o d multi)
{- {-
Since `spin` never returns, this will run until the main Wait for the ship to go down.
thread is killed with an async exception. The one we expect is
`UserInterrupt` which will be raised on this thread upon SIGKILL
or SIGTERM.
Once that happens, we write to `vKill` which will cause Since `waitCatch` will never throw an exception, the `onException`
all ships to be shut down, and then we `wait` for them to finish block will only happen if this thread is killed with an async
before returning. exception. The one we expect is `UserInterrupt` which will be raised
on this thread upon SIGKILL or SIGTERM.
If this thread is killed, we first ask the ship to go down, wait
for the ship to actually go down, and then go down ourselves.
-} -}
let spin = forever (threadDelay maxBound) onException (void $ waitCatch shipThread) $ do
finally spin $ do
logTrace "KING IS GOING DOWN" logTrace "KING IS GOING DOWN"
atomically (putTMVar vKill ()) void $ atomically $ tryPutTMVar vKill ()
waitCatch shipThread void $ waitCatch shipThread
runMultipleShips :: [(CLI.Run, CLI.Opts)] -> MultiEyreApi -> RIO KingEnv () runMultipleShips :: [(CLI.Run, CLI.Opts)] -> MultiEyreApi -> RIO KingEnv ()
@ -768,6 +773,10 @@ runMultipleShips ships multi = do
Once that happens, we write to `vKill` which will cause Once that happens, we write to `vKill` which will cause
all ships to be shut down, and then we `wait` for them to finish all ships to be shut down, and then we `wait` for them to finish
before returning. before returning.
This is different than the single-ship flow, because ships never
go down on their own in this flow. If they go down, they just bring
themselves back up.
-} -}
let spin = forever (threadDelay maxBound) let spin = forever (threadDelay maxBound)
finally spin $ do finally spin $ do

View File

@ -327,9 +327,11 @@ pier (serf, log) vSlog mStart vKilled multi = do
-- bullshit scry tester -- bullshit scry tester
void $ acquireWorker "bullshit scry tester" $ forever $ do void $ acquireWorker "bullshit scry tester" $ forever $ do
env <- ask
threadDelay 1_000_000 threadDelay 1_000_000
wen <- io Time.now wen <- io Time.now
let kal = \mTermNoun -> print ("scry result: ", mTermNoun) let kal = \mTermNoun -> runRIO env $ do
logTrace $ displayShow ("scry result: ", mTermNoun)
let nkt = MkKnot $ tshow $ Time.MkDate wen let nkt = MkKnot $ tshow $ Time.MkDate wen
let pax = Path ["j", "~zod", "life", nkt, "~zod"] let pax = Path ["j", "~zod", "life", nkt, "~zod"]
atomically $ putTMVar scryM (wen, Nothing, pax, kal) atomically $ putTMVar scryM (wen, Nothing, pax, kal)

View File

@ -110,7 +110,10 @@ execReplay serf log last = do
logTrace $ display $ "Replaying up to event #" <> tshow replayUpTo logTrace $ display $ "Replaying up to event #" <> tshow replayUpTo
logTrace $ display $ "Will replay " <> tshow numEvs <> " in total." logTrace $ display $ "Will replay " <> tshow numEvs <> " in total."
let onProgress n = print ("Serf is at event# " <> tshow n) env <- ask
let onProgress n = do
runRIO env $ logTrace $ display ("Serf is at event# " <> tshow n)
runResourceT runResourceT
$ runConduit $ runConduit

View File

@ -608,7 +608,7 @@ run serf maxBatchSize getLastEvInLog onInput sendOn spin = topLoop
que <- newTBMQueueIO 1 que <- newTBMQueueIO 1
() <- atomically (writeTBMQueue que firstWorkErr) () <- atomically (writeTBMQueue que firstWorkErr)
tWork <- async (processWork serf maxBatchSize que onWorkResp spin) tWork <- async (processWork serf maxBatchSize que onWorkResp spin)
flip onException (print "KILLING: run" >> cancel tWork) $ do flip onException (cancel tWork) $ do
nexSt <- workLoop que nexSt <- workLoop que
wait tWork wait tWork
nexSt nexSt