diff --git a/haskoin-core/Setup.hs b/Setup.hs similarity index 100% rename from haskoin-core/Setup.hs rename to Setup.hs diff --git a/haskoin-core/UNLICENSE b/UNLICENSE similarity index 100% rename from haskoin-core/UNLICENSE rename to UNLICENSE diff --git a/haskoin-core/haskoin-core.cabal b/haskoin-core.cabal similarity index 100% rename from haskoin-core/haskoin-core.cabal rename to haskoin-core.cabal diff --git a/haskoin-core/.stylish-haskell.yaml b/haskoin-core/.stylish-haskell.yaml deleted file mode 100644 index 818761f6..00000000 --- a/haskoin-core/.stylish-haskell.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# stylish-haskell configuration file -# ================================== - -# The stylish-haskell tool is mainly configured by specifying steps. These steps -# are a list, so they have an order, and one specific step may appear more than -# once (if needed). Each file is processed by these steps in the given order. -steps: - # Convert some ASCII sequences to their Unicode equivalents. This is disabled - # by default. - # - unicode_syntax: - # # In order to make this work, we also need to insert the UnicodeSyntax - # # language pragma. If this flag is set to true, we insert it when it's - # # not already present. You may want to disable it if you configure - # # language extensions using some other method than pragmas. Default: - # # true. - # add_language_pragma: true - - # Align the right hand side of some elements. This is quite conservative - # and only applies to statements where each element occupies a single - # line. - - simple_align: - cases: true - top_level_patterns: true - records: true - - # Import cleanup - - imports: - # There are different ways we can align names and lists. - # - # - global: Align the import names and import list throughout the entire - # file. - # - # - file: Like global, but don't add padding when there are no qualified - # imports in the file. - # - # - group: Only align the imports per group (a group is formed by adjacent - # import lines). - # - # - none: Do not perform any alignment. - # - # Default: global. - align: global - - # Folowing options affect only import list alignment. - # - # List align has following options: - # - # - after_alias: Import list is aligned with end of import including - # 'as' and 'hiding' keywords. - # - # > import qualified Data.List as List (concat, foldl, foldr, head, - # > init, last, length) - # - # - with_alias: Import list is aligned with start of alias or hiding. - # - # > import qualified Data.List as List (concat, foldl, foldr, head, - # > init, last, length) - # - # - new_line: Import list starts always on new line. - # - # > import qualified Data.List as List - # > (concat, foldl, foldr, head, init, last, length) - # - # Default: after alias - list_align: after_alias - - # Long list align style takes effect when import is too long. This is - # determined by 'columns' setting. - # - # - inline: This option will put as much specs on same line as possible. - # - # - new_line: Import list will start on new line. - # - # - new_line_multiline: Import list will start on new line when it's - # short enough to fit to single line. Otherwise it'll be multiline. - # - # - multiline: One line per import list entry. - # Type with contructor list acts like single import. - # - # > import qualified Data.Map as M - # > ( empty - # > , singleton - # > , ... - # > , delete - # > ) - # - # Default: inline - long_list_align: inline - - # List padding determines indentation of import list on lines after import. - # This option affects 'list_align' and 'long_list_align'. - list_padding: 4 - - # Separate lists option affects formating of import list for type - # or class. The only difference is single space between type and list - # of constructors, selectors and class functions. - # - # - true: There is single space between Foldable type and list of it's - # functions. - # - # > import Data.Foldable (Foldable (fold, foldl, foldMap)) - # - # - false: There is no space between Foldable type and list of it's - # functions. - # - # > import Data.Foldable (Foldable(fold, foldl, foldMap)) - # - # Default: true - separate_lists: true - - # Language pragmas - - language_pragmas: - # We can generate different styles of language pragma lists. - # - # - vertical: Vertical-spaced language pragmas, one per line. - # - # - compact: A more compact style. - # - # - compact_line: Similar to compact, but wrap each line with - # `{-#LANGUAGE #-}'. - # - # Default: vertical. - style: vertical - - # Align affects alignment of closing pragma brackets. - # - # - true: Brackets are aligned in same collumn. - # - # - false: Brackets are not aligned together. There is only one space - # between actual import and closing bracket. - # - # Default: true - align: true - - # stylish-haskell can detect redundancy of some language pragmas. If this - # is set to true, it will remove those redundant pragmas. Default: true. - remove_redundant: true - - # Replace tabs by spaces. This is disabled by default. - # - tabs: - # # Number of spaces to use for each tab. Default: 8, as specified by the - # # Haskell report. - # spaces: 8 - - # Remove trailing whitespace - - trailing_whitespace: {} - -# A common setting is the number of columns (parts of) code will be wrapped -# to. Different steps take this into account. Default: 80. -columns: 80 - -# Sometimes, language extensions are specified in a cabal file or from the -# command line instead of using language pragmas in the file. stylish-haskell -# needs to be aware of these, so it can parse the file correctly. -# -# No language extensions are enabled by default. -language_extensions: - - DeriveDataTypeable - - EmptyDataDecls - - FlexibleContexts - - FlexibleInstances - - GADTs - - OverloadedStrings - - RecordWildCards diff --git a/haskoin-node/Setup.hs b/haskoin-node/Setup.hs deleted file mode 100644 index 9a994af6..00000000 --- a/haskoin-node/Setup.hs +++ /dev/null @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff --git a/haskoin-node/UNLICENSE b/haskoin-node/UNLICENSE deleted file mode 100644 index 68a49daa..00000000 --- a/haskoin-node/UNLICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/haskoin-node/haskoin-node.cabal b/haskoin-node/haskoin-node.cabal deleted file mode 100644 index c6b848d7..00000000 --- a/haskoin-node/haskoin-node.cabal +++ /dev/null @@ -1,111 +0,0 @@ -name: haskoin-node -version: 0.4.3 -synopsis: - Implementation of a Bitoin node. -description: - haskoin-node provides an implementation of the Bitcoin network protocol - that allows you to synchronize headers (with SPV validation) and download - merkle blocks and full blocks. This package can be used to implement - wallets or other Bitcoin components that require talking to the Bitcoin - network. It provides the following features: - . - * Implementation of the Bitcoin network protocol - * Headertree implementation with SPV verification - * Headers-first synchronization - * Merkle block download from peers with bloom filters - * Full block download from peers - . - A wallet implementation using this package is available in haskoin-wallet. - -homepage: http://github.com/haskoin/haskoin -bug-reports: http://github.com/haskoin/haskoin/issues -tested-with: GHC==8.0.2 -stability: stable -license: PublicDomain -license-file: UNLICENSE -author: Philippe Laprade, Jean-Pierre Rupp -maintainer: xenog@protonmail.com -category: Bitcoin, Finance, Network -build-type: Simple -cabal-version: >= 1.9.2 - -source-repository head - type: git - location: git://github.com/haskoin/haskoin.git - -library - hs-source-dirs: src - exposed-modules: Network.Haskoin.Node.HeaderTree - Network.Haskoin.Node.Checkpoints - Network.Haskoin.Node.Peer - Network.Haskoin.Node.BlockChain - Network.Haskoin.Node.STM - other-modules: Network.Haskoin.Node.HeaderTree.Types - Network.Haskoin.Node.HeaderTree.Model - - extensions: OverloadedStrings - FlexibleInstances - FlexibleContexts - RecordWildCards - DeriveDataTypeable - - build-depends: aeson >= 1.2 && < 1.3 - , async >= 2.0 && < 2.2 - , base >= 4.8 && < 5 - , bytestring >= 0.10 && < 0.11 - , concurrent-extra >= 0.7 && < 0.8 - , cereal >= 0.5 && < 0.6 - , conduit >= 1.2 && < 1.3 - , conduit-extra >= 1.2 && < 1.3 - , containers >= 0.5 && < 0.6 - , data-default >= 0.5 && < 0.8 - , deepseq >= 1.4 && < 1.5 - , either >= 4.5 && < 4.6 - , esqueleto >= 2.4 && < 2.6 - , exceptions >= 0.8 && < 0.9 - , haskoin-core >= 0.3 && < 0.5 - , largeword >= 1.2.4 && < 1.3 - , lifted-async >= 0.2 && < 0.10 - , lifted-base >= 0.2 && < 0.3 - , monad-control >= 1.0 && < 1.1 - , monad-logger >= 0.3 && < 0.4 - , mtl >= 2.2 && < 2.3 - , network >= 2.4 && < 2.7 - , persistent >= 2.7 && < 2.8 - , persistent-template >= 2.5 && < 2.6 - , resource-pool >= 0.2 && < 0.3 - , random >= 1.0 && < 1.2 - , stm >= 2.4 && < 2.5 - , stm-chans >= 3.0 && < 3.1 - , stm-conduit >= 2.5 && < 3.1 - , string-conversions >= 0.4 && < 0.5 - , text >= 0.11 && < 1.3 - , time >= 1.8 && < 1.9 - - ghc-options: -Wall - -test-suite test-haskoin-node - type: exitcode-stdio-1.0 - main-is: Main.hs - - extensions: EmptyDataDecls - - other-modules: Network.Haskoin.Node.Tests - Network.Haskoin.Node.Units - - build-depends: base >= 4.8 && < 5 - , haskoin-core - , haskoin-node - , HUnit >= 1.6 && < 1.7 - , QuickCheck >= 2.10 && < 2.11 - , monad-logger >= 0.3 && < 0.4 - , mtl >= 2.2 && < 2.3 - , persistent >= 2.7 && < 2.8 - , persistent-sqlite >= 2.6 && < 2.7 - , resourcet >= 1.1 && < 1.2 - , test-framework >= 0.8 && < 0.9 - , test-framework-quickcheck2 >= 0.3 && < 0.4 - , test-framework-hunit >= 0.3 && < 0.4 - - ghc-options: -Wall - hs-source-dirs: test diff --git a/haskoin-node/src/Network/Haskoin/Node/BlockChain.hs b/haskoin-node/src/Network/Haskoin/Node/BlockChain.hs deleted file mode 100644 index aaae8f1e..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/BlockChain.hs +++ /dev/null @@ -1,713 +0,0 @@ -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Node.BlockChain where - -import Control.Concurrent (threadDelay) -import Control.Concurrent.Async.Lifted (link, mapConcurrently, - waitAnyCancel, withAsync) -import Control.Concurrent.STM (STM, atomically, isEmptyTMVar, - putTMVar, readTVar, retry, - takeTMVar, tryReadTMVar, - tryTakeTMVar) -import Control.Concurrent.STM.Lock (locked) -import qualified Control.Concurrent.STM.Lock as Lock (with) -import Control.Concurrent.STM.TBMChan (isEmptyTBMChan, readTBMChan) -import Control.Exception.Lifted (throw) -import Control.Monad (forM, forM_, forever, unless, - void, when) -import Control.Monad.Logger (MonadLoggerIO, logDebug, - logError, logInfo, logWarn) -import Control.Monad.Reader (ask, asks) -import Control.Monad.Trans (MonadIO, lift, liftIO) -import Control.Monad.Trans.Control (MonadBaseControl, liftBaseOp_) -import qualified Data.ByteString.Char8 as C (unpack) -import Data.Conduit (Source, yield) -import Data.List (nub) -import qualified Data.Map as M (delete, keys, lookup, - null) -import Data.Maybe (listToMaybe) -import qualified Data.Sequence as S (length) -import Data.String.Conversions (cs) -import Data.Text (pack) -import Data.Time.Clock.POSIX (getPOSIXTime) -import Data.Unique (hashUnique) -import Data.Word (Word32) -import Network.Haskoin.Block -import Network.Haskoin.Network -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Node.Peer -import Network.Haskoin.Node.STM -import Network.Haskoin.Transaction -import System.Random (randomIO) - -startSPVNode :: (MonadLoggerIO m, MonadBaseControl IO m) - => [PeerHost] - -> BloomFilter - -> Int - -> NodeT m () -startSPVNode hosts bloom elems = do - $(logDebug) "Setting our bloom filter in the node" - atomicallyNodeT $ sendBloomFilter bloom elems - $(logDebug) $ pack $ unwords - [ "Starting SPV node with", show $ length hosts, "hosts" ] - withAsync (void $ mapConcurrently startReconnectPeer hosts) $ \a1 -> do - link a1 - $(logInfo) "Starting the initial header sync" - headerSync - $(logInfo) "Initial header sync complete" - $(logDebug) "Starting the tickle processing thread" - withAsync processTickles $ \a2 -> link a2 >> do - _ <- liftIO $ waitAnyCancel [a1, a2] - return () - $(logDebug) "Exiting SPV-node thread" - --- Source of all transaction broadcasts -txSource :: (MonadLoggerIO m, MonadBaseControl IO m) - => Source (NodeT m) Tx -txSource = do - chan <- lift $ asks sharedTxChan - $(logDebug) "Waiting to receive a transaction..." - resM <- liftIO $ atomically $ readTBMChan chan - case resM of - Just (pid, ph, tx) -> do - $(logInfo) $ formatPid pid ph $ unwords - [ "Received transaction broadcast", cs $ txHashToHex $ txHash tx ] - yield tx >> txSource - _ -> $(logError) "Tx channel closed unexpectedly" - -handleGetData :: (MonadLoggerIO m, MonadBaseControl IO m) - => (TxHash -> m (Maybe Tx)) - -> NodeT m () -handleGetData handler = forever $ do - $(logDebug) "Waiting for GetData transaction requests..." - -- Wait for tx GetData requests to be available - txids <- atomicallyNodeT $ do - datMap <- readTVarS sharedTxGetData - if M.null datMap then lift retry else return $ M.keys datMap - forM (nub txids) $ \tid -> lift (handler tid) >>= \txM -> do - $(logDebug) $ pack $ unwords - [ "Processing GetData txid request", cs $ txHashToHex tid ] - pidsM <- atomicallyNodeT $ do - datMap <- readTVarS sharedTxGetData - writeTVarS sharedTxGetData $ M.delete tid datMap - return $ M.lookup tid datMap - case (txM, pidsM) of - -- Send the transaction to the required peers - (Just tx, Just pids) -> forM_ pids $ \(pid, ph) -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "Sending tx", cs $ txHashToHex tid, "to peer" ] - atomicallyNodeT $ trySendMessage pid $ MTx tx - _ -> return () - -broadcastTxs :: (MonadLoggerIO m, MonadBaseControl IO m) - => [TxHash] - -> NodeT m () -broadcastTxs txids = do - forM_ txids $ \tid -> $(logInfo) $ pack $ unwords - [ "Transaction INV broadcast:", cs $ txHashToHex tid ] - -- Broadcast an INV message for new transactions - let msg = MInv $ Inv $ map (InvVector InvTx . getTxHash) txids - atomicallyNodeT $ sendMessageAll msg - -rescanTs :: Timestamp -> NodeT STM () -rescanTs ts = do - rescanTMVar <- asks sharedRescan - lift $ do - -- Make sure the TMVar is empty - _ <- tryTakeTMVar rescanTMVar - putTMVar rescanTMVar $ Left ts - -rescanHeight :: BlockHeight -> NodeT STM () -rescanHeight h = do - rescanTMVar <- asks sharedRescan - lift $ do - -- Make sure the TMVar is empty - _ <- tryTakeTMVar rescanTMVar - putTMVar rescanTMVar $ Right h - --- Wait for the next merkle batch to be available. This function will check for --- rescans. -merkleDownload - :: (MonadLoggerIO m, MonadBaseControl IO m) - => BlockHash - -> Word32 - -> NodeT m - (BlockChainAction, Source (NodeT m) (Either (MerkleBlock, MerkleTxs) Tx)) -merkleDownload walletHash batchSize = do - -- Store the best block received from the wallet for information only. - -- This will be displayed in `hw status` - merkleSyncedActions walletHash - walletBlockM <- runSqlNodeT $ getBlockByHash walletHash - walletBlock <- case walletBlockM of - Just walletBlock -> do - atomicallyNodeT $ writeTVarS sharedBestBlock walletBlock - return walletBlock - Nothing -> - error "Could not find wallet best block in headers" - rescanTMVar <- asks sharedRescan - -- Wait either for a new block to arrive or a rescan to be triggered - $(logDebug) "Waiting for a new block or a rescan..." - resE <- atomicallyNodeT $ orElseNodeT - (fmap Left $ lift $ takeTMVar rescanTMVar) - (const (Right ()) <$> waitNewBlock walletHash) - resM <- case resE of - -- A rescan was triggered - Left valE -> do - $(logInfo) $ pack $ unwords - [ "Got rescan request", show valE ] - -- Wait until rescan conditions are met - newValE <- waitRescan rescanTMVar valE - $(logDebug) $ pack $ unwords - [ "Rescan condition reached:", show newValE ] - case newValE of - Left ts -> tryMerkleDwnTimestamp ts batchSize - Right _ -> tryMerkleDwnHeight walletBlock batchSize - -- Continue download from a hash - Right _ -> tryMerkleDwnBlock walletBlock batchSize - case resM of - Just res -> return res - _ -> do - $(logWarn) "Invalid merkleDownload result. Retrying ..." - -- Sleep 10 seconds and retry - liftIO $ threadDelay $ 10*1000000 - merkleDownload walletHash batchSize - where - waitRescan rescanTMVar valE = do - resE <- atomicallyNodeT $ orElseNodeT - (fmap Left (lift $ takeTMVar rescanTMVar)) - (waitVal valE >> return (Right valE)) - case resE of - Left newValE -> waitRescan rescanTMVar newValE - Right res -> return res - waitVal valE = case valE of - Left ts -> waitFastCatchup ts - Right h -> waitHeight h - --- | Perform some actions only when headers have been synced. -merkleSyncedActions - :: (MonadLoggerIO m, MonadBaseControl IO m) - => BlockHash -- ^ Wallet best block - -> NodeT m () -merkleSyncedActions walletHash = - asks sharedSyncLock >>= \lock -> liftBaseOp_ (Lock.with lock) $ do - -- Check if we are synced - (synced, mempool, header) <- atomicallyNodeT $ do - header <- readTVarS sharedBestHeader - synced <- areBlocksSynced walletHash - mempool <- readTVarS sharedMempool - return (synced, mempool, header) - when synced $ do - $(logInfo) $ pack $ unwords - [ "Merkle blocks are in sync with the" - , "network at height", show walletHash - ] - -- Prune side chains - bestBlock <- runSqlNodeT $ pruneChain header - atomicallyNodeT $ do - -- Update shared best header after pruning - writeTVarS sharedBestHeader bestBlock - writeTVarS sharedMerklePeer Nothing - -- Do a mempool sync on the first merkle sync - unless mempool $ do - atomicallyNodeT $ do - sendMessageAll MMempool - writeTVarS sharedMempool True - $(logInfo) "Requesting a mempool sync" - --- Wait for headers to catch up to the given height -waitHeight :: BlockHeight -> NodeT STM () -waitHeight height = do - node <- readTVarS sharedBestHeader - -- Check if we passed the timestamp condition - unless (height < nodeBlockHeight node) $ lift retry - --- Wait for headers to catch up to the given timestamp -waitFastCatchup :: Timestamp -> NodeT STM () -waitFastCatchup ts = do - node <- readTVarS sharedBestHeader - -- Check if we passed the timestamp condition - unless (ts < blockTimestamp (nHeader node)) $ - lift retry - --- Wait for a new block to be available for download -waitNewBlock :: BlockHash -> NodeT STM () -waitNewBlock bh = do - node <- readTVarS sharedBestHeader - -- We have more merkle blocks to download - unless (bh /= nodeHash node) $ - lift retry - -tryMerkleDwnHeight - :: (MonadLoggerIO m, MonadBaseControl IO m) - => NodeBlock - -> Word32 - -> NodeT m (Maybe (BlockChainAction, - Source (NodeT m) (Either (MerkleBlock, MerkleTxs) Tx))) -tryMerkleDwnHeight block batchSize = do - $(logInfo) $ pack $ unwords - [ "Requesting merkle blocks at height", show $ nodeBlockHeight block - , "with batch size", show batchSize - ] - -- Request height - 1 as we want to start downloading at height - nodeM <- runSqlNodeT $ getParentBlock block - case nodeM of - Just bn -> - tryMerkleDwnBlock bn batchSize - _ -> do - $(logDebug) $ pack $ unwords - [ "Can't download merkle blocks." - , "Waiting for headers to sync ..." - ] - return Nothing - -tryMerkleDwnTimestamp - :: (MonadLoggerIO m, MonadBaseControl IO m) - => Timestamp - -> Word32 - -> NodeT m (Maybe (BlockChainAction, - Source (NodeT m) (Either (MerkleBlock, MerkleTxs) Tx))) -tryMerkleDwnTimestamp ts batchSize = do - $(logInfo) $ pack $ unwords - [ "Requesting merkle blocks after timestamp", show ts - , "with batch size", show batchSize - ] - nodeM <- runSqlNodeT $ getBlockAfterTime ts - case nodeM of - Just bh -> - tryMerkleDwnBlock bh batchSize - _ -> do - $(logDebug) $ pack $ unwords - [ "Can't download merkle blocks." - , "Waiting for headers to sync ..." - ] - return Nothing - -tryMerkleDwnBlock - :: (MonadLoggerIO m, MonadBaseControl IO m) - => NodeBlock - -> Word32 - -> NodeT m (Maybe (BlockChainAction, - Source (NodeT m) (Either (MerkleBlock, MerkleTxs) Tx))) -tryMerkleDwnBlock bh batchSize = do - $(logDebug) $ pack $ unwords - [ "Requesting merkle download from block" - , cs $ blockHashToHex (nodeHash bh) - , "and batch size", show batchSize - ] - -- Get the list of merkle blocks to download from our headers - best <- atomicallyNodeT $ readTVarS sharedBestHeader - action <- runSqlNodeT $ getBlockWindow best bh batchSize - case actionNodes action of - [] -> do - $(logError) "BlockChainAction was empty" - return Nothing - ns -> do - -- Wait for a peer available for merkle download - (pid, PeerSession{..}) <- waitMerklePeer $ - nodeBlockHeight $ last ns - - $(logDebug) $ formatPid pid peerSessionHost $ unwords - [ "Found merkle downloading peer with score" - , show peerSessionScore - ] - - let source = peerMerkleDownload pid peerSessionHost action - return $ Just (action, source) - where - waitMerklePeer height = atomicallyNodeT $ do - pidM <- readTVarS sharedHeaderPeer - allPeers <- getPeersAtHeight (>= height) - let f (pid,_) = Just pid /= pidM - -- Filter out the peer syncing headers (if there is one) - peers = filter f allPeers - case listToMaybe peers of - Just res@(pid,_) -> do - writeTVarS sharedMerklePeer $ Just pid - return res - _ -> lift retry - -peerMerkleDownload - :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> BlockChainAction - -> Source (NodeT m) (Either (MerkleBlock, MerkleTxs) Tx) -peerMerkleDownload pid ph action = do - let bids = map nodeHash $ actionNodes action - vs = map (InvVector InvMerkleBlock . getBlockHash) bids - $(logInfo) $ formatPid pid ph $ unwords - [ "Requesting", show $ length bids, "merkle block(s)" ] - nonce <- liftIO randomIO - -- Request a merkle batch download - sessM <- lift . atomicallyNodeT $ do - _ <- trySendMessage pid $ MGetData $ GetData vs - -- Send a ping to have a recognizable end message for - -- the last merkle block download. - _ <- trySendMessage pid $ MPing $ Ping nonce - tryGetPeerSession pid - case sessM of - Just PeerSession{..} -> checkOrder peerSessionMerkleChan bids - _ -> lift . atomicallyNodeT $ - writeTVarS sharedMerklePeer Nothing - where - -- Build a source that that will check the order of the received merkle - -- blocks against the initial request. If merkle blocks are sent out of - -- order, the source will close and the peer will be flagged as - -- misbehaving. The source will also close once all the requested merkle - -- blocks have been received from the peer. - checkOrder _ [] = lift . atomicallyNodeT $ - writeTVarS sharedMerklePeer Nothing - checkOrder chan (bid:bids) = do - -- Read the channel or disconnect the peer after waiting for 2 minutes - resM <- lift $ raceTimeout 120 - (disconnectPeer pid ph) - (liftIO . atomically $ readTBMChan chan) - case resM of - -- Forward transactions - Right (Just res@(Right _)) -> - yield res >> checkOrder chan (bid:bids) - Right (Just res@(Left (MerkleBlock mHead _ _ _, _))) -> do - let mBid = headerHash mHead - $(logDebug) $ formatPid pid ph $ unwords - [ "Processing merkle block", cs $ blockHashToHex mBid ] - -- Check if we were expecting this merkle block - if mBid == bid - then yield res >> checkOrder chan bids - else lift $ do - atomicallyNodeT $ writeTVarS sharedMerklePeer Nothing - -- If we were not expecting this merkle block, do not - -- yield the merkle block and close the source - misbehaving pid ph moderateDoS $ unwords - [ "Peer sent us merkle block hash" - , cs $ blockHashToHex $ headerHash mHead - , "but we expected merkle block hash" - , cs $ blockHashToHex bid - ] - -- Not sure how to recover from this situation. - -- Disconnect the peer. TODO: Is there a way to recover - -- without buffering the whole batch in memory and - -- re-order it? - disconnectPeer pid ph - -- The channel closed. Stop here. - _ -> do - $(logWarn) $ formatPid pid ph - "Merkle channel closed unexpectedly" - lift $ atomicallyNodeT $ writeTVarS sharedMerklePeer Nothing - -processTickles :: (MonadLoggerIO m, MonadBaseControl IO m) - => NodeT m () -processTickles = forever $ do - $(logDebug) $ pack "Waiting for a block tickle ..." - (pid, ph, tickle) <- atomicallyNodeT waitTickle - $(logInfo) $ formatPid pid ph $ unwords - [ "Received block tickle", cs $ blockHashToHex tickle ] - heightM <- fmap nodeBlockHeight <$> runSqlNodeT (getBlockByHash tickle) - case heightM of - Just height -> do - $(logInfo) $ formatPid pid ph $ unwords - [ "The block tickle", cs $ blockHashToHex tickle - , "is already connected" - ] - updatePeerHeight pid ph height - _ -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "The tickle", cs $ blockHashToHex tickle - , "is unknown. Requesting a peer header sync." - ] - peerHeaderSyncFull pid ph `catchAny` const (disconnectPeer pid ph) - newHeightM <- - fmap nodeBlockHeight <$> runSqlNodeT (getBlockByHash tickle) - case newHeightM of - Just height -> do - $(logInfo) $ formatPid pid ph $ unwords - [ "The block tickle", cs $ blockHashToHex tickle - , "was connected successfully" - ] - updatePeerHeight pid ph height - _ -> $(logWarn) $ formatPid pid ph $ unwords - [ "Could not find the height of block tickle" - , cs $ blockHashToHex tickle - ] - where - updatePeerHeight pid ph height = do - $(logInfo) $ formatPid pid ph $ unwords - [ "Updating peer height to", show height ] - atomicallyNodeT $ do - modifyPeerSession pid $ \s -> - s{ peerSessionHeight = height } - updateNetworkHeight - -waitTickle :: NodeT STM (PeerId, PeerHost, BlockHash) -waitTickle = do - tickleChan <- asks sharedTickleChan - resM <- lift $ readTBMChan tickleChan - case resM of - Just res -> return res - _ -> throw $ NodeException "tickle channel closed unexpectedly" - -syncedHeight :: MonadIO m => NodeT m (Bool, Word32) -syncedHeight = atomicallyNodeT $ do - synced <- areHeadersSynced - ourHeight <- nodeBlockHeight <$> readTVarS sharedBestHeader - return (synced, ourHeight) - -headerSync :: (MonadLoggerIO m, MonadBaseControl IO m) - => NodeT m () -headerSync = do - -- Start the header sync - $(logDebug) "Syncing more headers. Finding the best peer..." - (pid, PeerSession{..}) <- atomicallyNodeT $ do - peers <- getPeersAtNetHeight - case listToMaybe peers of - Just res@(pid,_) -> do - -- Save the header syncing peer - writeTVarS sharedHeaderPeer $ Just pid - return res - _ -> lift retry - - $(logDebug) $ formatPid pid peerSessionHost $ unwords - [ "Found best header syncing peer with score" - , show peerSessionScore - ] - - -- Run a maximum of 10 header downloads with this peer. - -- Then we re-evaluate the best peer - continue <- catchAny (peerHeaderSyncLimit pid peerSessionHost 10) $ - \e -> do - $(logError) $ pack $ unwords ["peerHeaderSyncLimit:", show e] - disconnectPeer pid peerSessionHost >> return True - - -- Reset the header syncing peer - atomicallyNodeT $ writeTVarS sharedHeaderPeer Nothing - - -- Check if we should continue the header sync - if continue then headerSync else do - (synced, ourHeight) <- syncedHeight - if synced - then - $(logInfo) $ formatPid pid peerSessionHost $ unwords - [ "Block headers are in sync with the" - , "network at height", show ourHeight - ] - -- Continue the download if we are not yet synced - else headerSync - -peerHeaderSyncLimit :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> Int - -> NodeT m Bool -peerHeaderSyncLimit pid ph initLimit - | initLimit < 1 = error "Limit must be at least 1" - | otherwise = go initLimit Nothing - where - go limit prevM = peerHeaderSync pid ph prevM >>= \actionM -> case actionM of - Just action -> - -- If we received a side chain or a known chain, we want to - -- continue szncing from this peer even if the limit has been - -- reached. - if limit > 1 || isSideChain action || isKnownChain action - then go (limit - 1) actionM - -- We got a Just, so we can continue the download from - -- this peer - else return True - _ -> return False - --- Sync all the headers from a given peer -peerHeaderSyncFull :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> NodeT m () -peerHeaderSyncFull pid ph = - go Nothing - where - go prevM = peerHeaderSync pid ph prevM >>= \actionM -> case actionM of - Just _ -> go actionM - Nothing -> do - (synced, ourHeight) <- syncedHeight - when synced $ $(logInfo) $ formatPid pid ph $ unwords - [ "Block headers are in sync with the" - , "network at height", show ourHeight - ] - -areBlocksSynced :: BlockHash -> NodeT STM Bool -areBlocksSynced walletHash = do - headersSynced <- areHeadersSynced - bestHeader <- readTVarS sharedBestHeader - return $ headersSynced && walletHash == nodeHash bestHeader - --- Check if the block headers are synced with the network height -areHeadersSynced :: NodeT STM Bool -areHeadersSynced = do - ourHeight <- nodeBlockHeight <$> readTVarS sharedBestHeader - netHeight <- readTVarS sharedNetworkHeight - -- If netHeight == 0 then we did not connect to any peers yet - return $ ourHeight >= netHeight && netHeight > 0 - --- | Sync one batch of headers from the given peer. Accept the result of a --- previous peerHeaderSync to correctly compute block locators in the --- presence of side chains. -peerHeaderSync :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> Maybe BlockChainAction - -> NodeT m (Maybe BlockChainAction) -peerHeaderSync pid ph prevM = do - $(logDebug) $ formatPid pid ph "Waiting for the HeaderSync lock" - -- Aquire the header syncing lock - lock <- asks sharedSyncLock - liftBaseOp_ (Lock.with lock) $ do - - best <- atomicallyNodeT $ readTVarS sharedBestHeader - - -- Retrieve the block locator - loc <- case prevM of - Just (KnownChain ns) -> do - $(logDebug) $ formatPid pid ph "Building a known chain locator" - runSqlNodeT $ bLocator $ last ns - Just (SideChain ns) -> do - $(logDebug) $ formatPid pid ph "Building a side chain locator" - runSqlNodeT $ bLocator $ last ns - Just (BestChain ns) -> do - $(logDebug) $ formatPid pid ph "Building a best chain locator" - runSqlNodeT $ bLocator $ last ns - Just (ChainReorg _ _ ns) -> do - $(logDebug) $ formatPid pid ph "Building a reorg locator" - runSqlNodeT $ bLocator $ last ns - Nothing -> do - $(logDebug) $ formatPid pid ph "Building a locator to best" - runSqlNodeT $ bLocator best - - $(logDebug) $ formatPid pid ph $ unwords - [ "Requesting headers with block locator of size" - , show $ length loc - , "Start block:", cs $ blockHashToHex $ head loc - , "End block:", cs $ blockHashToHex $ last loc - ] - - -- Send a GetHeaders message to the peer - atomicallyNodeT $ sendMessage pid $ MGetHeaders $ GetHeaders 0x01 loc z - - $(logDebug) $ formatPid pid ph "Waiting 2 minutes for headers..." - - -- Wait 120 seconds for a response or time out - continueE <- raceTimeout 120 (disconnectPeer pid ph) (waitHeaders best) - - -- Return True if we can continue syncing from this peer - return $ either (const Nothing) id continueE - where - z = "0000000000000000000000000000000000000000000000000000000000000000" - -- Wait for the headers to be available - waitHeaders best = do - (rPid, headers) <- atomicallyNodeT $ takeTMVarS sharedHeaders - if rPid == pid - then processHeaders best headers - else waitHeaders best - processHeaders _ (Headers []) = do - $(logDebug) $ formatPid pid ph - "Received empty headers. Finished downloading headers." - -- Do not continue the header download - return Nothing - processHeaders best (Headers hs) = do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received", show $ length hs, "headers." - , "Start blocks:", cs $ blockHashToHex $ headerHash $ fst $ head hs - , "End blocks:", cs $ blockHashToHex $ headerHash $ fst $ last hs - ] - now <- round <$> liftIO getPOSIXTime - actionE <- runSqlNodeT $ connectHeaders best (map fst hs) now - case actionE of - Left err -> do - misbehaving pid ph severeDoS err - return Nothing - Right action -> case actionNodes action of - [] -> do - $(logWarn) $ formatPid pid ph $ unwords - [ "Received an empty blockchain action:", show action ] - return Nothing - nodes -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received", show $ length nodes - , "nodes in the action" - ] - let height = nodeBlockHeight $ last nodes - case action of - KnownChain _ -> - $(logInfo) $ formatPid pid ph $ unwords - [ "KnownChain headers received" - , "up to height", show height - ] - SideChain _ -> - $(logInfo) $ formatPid pid ph $ unwords - [ "SideChain headers connected successfully" - , "up to height", show height - ] - -- Headers extend our current best head - _ -> do - $(logInfo) $ formatPid pid ph $ unwords - [ "Best headers connected successfully" - , "up to height", show height - ] - atomicallyNodeT $ - writeTVarS sharedBestHeader $ last nodes - -- If we received less than 2000 headers, we are done - -- syncing from this peer and we return Nothing. - return $ if length hs < 2000 - then Nothing - else Just action - -nodeStatus :: NodeT STM NodeStatus -nodeStatus = do - nodeStatusPeers <- mapM peerStatus =<< getPeers - SharedNodeState{..} <- ask - lift $ do - best <- readTVar sharedBestBlock - header <- readTVar sharedBestHeader - let nodeStatusBestBlock = nodeHash best - nodeStatusBestBlockHeight = nodeBlockHeight best - nodeStatusBestHeader = nodeHash header - nodeStatusBestHeaderHeight = nodeBlockHeight header - nodeStatusNetworkHeight <- - readTVar sharedNetworkHeight - nodeStatusBloomSize <- - maybe 0 (S.length . bloomData . fst) <$> readTVar sharedBloomFilter - nodeStatusHeaderPeer <- - fmap hashUnique <$> readTVar sharedHeaderPeer - nodeStatusMerklePeer <- - fmap hashUnique <$> readTVar sharedMerklePeer - nodeStatusHaveHeaders <- - not <$> isEmptyTMVar sharedHeaders - nodeStatusHaveTickles <- - not <$> isEmptyTBMChan sharedTickleChan - nodeStatusHaveTxs <- - not <$> isEmptyTBMChan sharedTxChan - nodeStatusGetData <- - M.keys <$> readTVar sharedTxGetData - nodeStatusRescan <- - tryReadTMVar sharedRescan - nodeStatusMempool <- - readTVar sharedMempool - nodeStatusSyncLock <- - locked sharedSyncLock - return NodeStatus{..} - -peerStatus :: (PeerId, PeerSession) -> NodeT STM PeerStatus -peerStatus (pid, PeerSession{..}) = do - hostM <- getHostSession peerSessionHost - let peerStatusPeerId = hashUnique pid - peerStatusHost = peerSessionHost - peerStatusConnected = peerSessionConnected - peerStatusHeight = peerSessionHeight - peerStatusProtocol = version <$> peerSessionVersion - peerStatusUserAgent = - C.unpack . getVarString . userAgent <$> peerSessionVersion - peerStatusPing = show <$> peerSessionScore - peerStatusDoSScore = peerHostSessionScore <$> hostM - peerStatusLog = peerHostSessionLog <$> hostM - peerStatusReconnectTimer = peerHostSessionReconnect <$> hostM - lift $ do - peerStatusHaveMerkles <- not <$> isEmptyTBMChan peerSessionMerkleChan - peerStatusHaveMessage <- not <$> isEmptyTBMChan peerSessionChan - peerStatusPingNonces <- readTVar peerSessionPings - return PeerStatus{..} - diff --git a/haskoin-node/src/Network/Haskoin/Node/Checkpoints.hs b/haskoin-node/src/Network/Haskoin/Node/Checkpoints.hs deleted file mode 100644 index def7fdcd..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/Checkpoints.hs +++ /dev/null @@ -1,30 +0,0 @@ -module Network.Haskoin.Node.Checkpoints -( checkpointMap -, checkpointList -, verifyCheckpoint -) where - -import Data.Map.Strict (Map) -import qualified Data.Map.Strict as M (fromList, lookup) - -import Data.Word (Word32) -import Network.Haskoin.Block -import Network.Haskoin.Constants - --- | Checkpoints from bitcoind reference implementation /src/checkpoints.cpp --- presented as an IntMap. -checkpointMap :: Map Word32 BlockHash -checkpointMap = M.fromList checkpointList - --- | Checkpoints from bitcoind reference implementation /src/checkpoints.cpp --- presented as a list. -checkpointList :: [(Word32, BlockHash)] -checkpointList = checkpoints - --- | Verify that a block hash at a given height either matches an existing --- checkpoint or is not a checkpoint. -verifyCheckpoint :: Word32 -> BlockHash -> Bool -verifyCheckpoint height hash = case M.lookup height checkpointMap of - Just value -> hash == value - Nothing -> True - diff --git a/haskoin-node/src/Network/Haskoin/Node/HeaderTree.hs b/haskoin-node/src/Network/Haskoin/Node/HeaderTree.hs deleted file mode 100644 index 330cd16b..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/HeaderTree.hs +++ /dev/null @@ -1,640 +0,0 @@ -{-# LANGUAGE KindSignatures #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE RankNTypes #-} -module Network.Haskoin.Node.HeaderTree -( BlockChainAction(..) -, BlockHeight -, NodeBlock -, Timestamp -, initHeaderTree -, migrateHeaderTree -, getBestBlock -, getHeads -, getBlockByHash -, getParentBlock -, getBlockWindow -, getBlockAfterTime -, getChildBlocks -, getBlockByHeight -, getBlocksByHeight -, getBlocksFromHeight -, getBlocksAtHeight -, putBlock -, putBlocks -, nGenesisBlock -, splitBlock -, splitChains -, nodeBlock -, nodeBlockHeight -, nodeHash -, nHeader -, nodePrev -, nodeTimestamp -, nWork -, nHeight -, nodeChain -, isBestChain -, isChainReorg -, isSideChain -, isKnownChain -, connectHeader -, connectHeaders -, bLocator -, pruneChain -) where - -import Control.Monad (foldM, forM, unless, - when, (<=<)) -import Control.Monad.Except (ExceptT (..), - runExceptT, throwError) -import Control.Monad.State (evalStateT, get, put) -import Control.Monad.Trans (MonadIO, lift) -import Data.Bits (shiftL) -import qualified Data.ByteString as BS (take) -import Data.Either (fromRight) -import Data.Function (on) -import Data.List (find, maximumBy, sort) -import Data.Maybe (fromMaybe, isNothing, - listToMaybe, mapMaybe) -import Data.Serialize (decode, encode) -import Data.String.Conversions (cs) -import Data.Word (Word32) -import Database.Esqueleto (Esqueleto, Value, asc, - delete, from, groupBy, - in_, insertMany_, limit, - max_, not_, orderBy, - select, set, unValue, - update, val, valList, - where_, (!=.), (&&.), - (<=.), (=.), (==.), - (>.), (>=.), (^.), - (||.)) -import Database.Persist (Entity (..), insert_) -import Database.Persist.Sql (SqlPersistT) -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Node.Checkpoints -import Network.Haskoin.Node.HeaderTree.Model -import Network.Haskoin.Node.HeaderTree.Types - -data BlockChainAction - = BestChain { actionNodes :: ![NodeBlock] } - | ChainReorg { actionSplitNode :: !NodeBlock - , actionOldNodes :: ![NodeBlock] - , actionNodes :: ![NodeBlock] - } - | SideChain { actionNodes :: ![NodeBlock] } - | KnownChain { actionNodes :: ![NodeBlock] } - deriving (Show, Eq) - -shortHash :: BlockHash -> ShortHash -shortHash = - fromRight (error "Could not decdoe block hash") . - decode . BS.take 8 . encode . getBlockHash - -nHeader :: NodeBlock -> BlockHeader -nHeader = getNodeHeader . nodeBlockHeader - -nodeHash :: NodeBlock -> BlockHash -nodeHash = headerHash . nHeader - -nodePrev :: NodeBlock -> BlockHash -nodePrev = prevBlock . nHeader - -nodeTimestamp :: NodeBlock -> Timestamp -nodeTimestamp = blockTimestamp . nHeader - -nWork :: NodeBlock -> Work -nWork = nodeBlockWork - -nHeight :: NodeBlock -> BlockHeight -nHeight = nodeBlockHeight - -nodeChain :: NodeBlock -> Word32 -nodeChain = nodeBlockChain - --- | Number of blocks on average between difficulty cycles (2016 blocks). -diffInterval :: Word32 -diffInterval = targetTimespan `div` targetSpacing - --- | Genesis block. -nGenesisBlock :: NodeBlock -nGenesisBlock = NodeBlock - { nodeBlockHash = shortHash $ headerHash genesisHeader - , nodeBlockHeader = NodeHeader genesisHeader - , nodeBlockWork = 1.0 - , nodeBlockHeight = 0 - , nodeBlockChain = 0 - } - --- | Initialize the block header chain by inserting the genesis block if it --- doesn't already exist. -initHeaderTree :: MonadIO m => SqlPersistT m () -initHeaderTree = do - nodeM <- getBlockByHash $ headerHash genesisHeader - when (isNothing nodeM) $ putBlock nGenesisBlock - -getVerifyParams - :: MonadIO m - => BlockHeader - -> ExceptT String (SqlPersistT m) - (NodeBlock, [Timestamp], Timestamp, Word32, Maybe Word32) -getVerifyParams bh = do - parentM <- lift $ getBlockByHash $ prevBlock bh - parent <- maybe (throwError "Could not get parent node") return parentM - checkPointM <- fmap nodeBlockHeight <$> lift lastSeenCheckpoint - diffBlockM <- lift $ getBlockByHeight parent $ - nodeBlockHeight parent `div` diffInterval * diffInterval - diffTime <- maybe (throwError "Could not get difficulty change block") - (return . nodeTimestamp) - diffBlockM - medianBlocks <- lift $ map nodeTimestamp <$> - getBlocksFromHeight parent 11 (min 0 $ nodeBlockHeight parent - 10) - minWork <- lift $ findMinWork parent - return (parent, medianBlocks, diffTime, minWork, checkPointM) - -findMinWork :: MonadIO m => NodeBlock -> SqlPersistT m MinWork -findMinWork bn - | isMinWork bn = return $ blockBits $ nHeader bn - | otherwise = getParentBlock bn >>= - maybe (return $ blockBits $ nHeader bn) findMinWork - -isMinWork :: NodeBlock -> Bool -isMinWork bn - | not allowMinDifficultyBlocks = True - | nodeBlockHeight bn `mod` diffInterval == 0 = True - | blockBits (nHeader bn) /= encodeCompact powLimit = True - | otherwise = False - -splitKnown :: MonadIO m - => [BlockHeader] - -> SqlPersistT m ([NodeBlock], [BlockHeader]) -splitKnown hs = do - (kno, unk) <- foldM f ([], []) hs - return (reverse kno, reverse unk) - where - f (kno, []) n = do - bnM <- getBlockByHash (headerHash n) - case bnM of - Nothing -> return (kno, [n]) - Just bn -> return (bn:kno, []) - f (kno, unk) n = return (kno, n:unk) - --- | Connect a block header to this block header chain. Corresponds to bitcoind --- function ProcessBlockHeader and AcceptBlockHeader in main.cpp. -connectHeader :: MonadIO m - => NodeBlock - -> BlockHeader - -> Timestamp - -> SqlPersistT m (Either String BlockChainAction) -connectHeader best bh ts = runExceptT $ do - (kno, _) <- lift $ splitKnown [bh] - case kno of - [] -> do - (parent, medians, diffTime, minWork, cpM) <- getVerifyParams bh - chain <- lift $ getChain parent - let bn = nodeBlock parent chain bh - ExceptT . return $ - verifyBlockHeader parent medians diffTime cpM minWork ts bh - lift $ putBlock bn - lift $ evalNewChain best [bn] - _ -> return $ KnownChain kno - --- | A more efficient way of connecting a list of block headers than connecting --- them individually. The list of block headers have must form a valid chain. -connectHeaders :: MonadIO m - => NodeBlock - -> [BlockHeader] - -> Timestamp - -> SqlPersistT m (Either String BlockChainAction) -connectHeaders _ [] _ = runExceptT $ throwError "Nothing to connect" -connectHeaders best bhs ts = runExceptT $ do - unless (validChain bhs) $ throwError "Block headers do not form a valid chain" - (kno, unk) <- lift $ splitKnown bhs - case unk of - [] -> return $ KnownChain kno - (bh:_) -> do - (parent, medians, diffTime, minWork, cpM) <- getVerifyParams bh - chain <- lift $ getChain parent - nodes <- (`evalStateT` (parent, diffTime, medians, minWork)) $ - forM unk $ \b -> do - (p, d, ms, mw) <- get - lift . ExceptT . return $ verifyBlockHeader p ms d cpM mw ts b - let bn = nodeBlock p chain b - d' = if nodeBlockHeight bn `mod` diffInterval == 0 - then blockTimestamp b - else d - ms' = blockTimestamp b : if length ms == 11 - then tail ms - else ms - mw' = if isMinWork bn then blockBits b else mw - put (bn, d', ms', mw') - return bn - lift $ putBlocks nodes - lift $ evalNewChain best nodes - where - validChain (a:b:xs) = prevBlock b == headerHash a && validChain (b:xs) - validChain [_] = True - validChain _ = False - --- | Returns True if the action is a best chain. -isBestChain :: BlockChainAction -> Bool -isBestChain (BestChain _) = True -isBestChain _ = False - --- | Returns True if the action is a chain reorg. -isChainReorg :: BlockChainAction -> Bool -isChainReorg ChainReorg{} = True -isChainReorg _ = False - --- | Returns True if the action is a side chain. -isSideChain :: BlockChainAction -> Bool -isSideChain (SideChain _) = True -isSideChain _ = False - --- | Returns True if the action is a known chain. -isKnownChain :: BlockChainAction -> Bool -isKnownChain (KnownChain _) = True -isKnownChain _ = False - --- | Returns a BlockLocator object for a given block hash. -bLocator :: MonadIO m => NodeBlock -> SqlPersistT m BlockLocator -bLocator node = do - nodes <- getBlocksByHeight node bs - return $ map nodeHash nodes - where - h = nodeBlockHeight node - f x s = (fst x - s, fst x > s) - bs = (++ [0]) $ map fst $ takeWhile snd $ - [(h - x, x < h) | x <- [0..9]] ++ - scanl f (h - 10, h > 10) [2 ^ (x :: Word32) | x <- [1..]] - --- | Verify block header conforms to protocol. -verifyBlockHeader :: NodeBlock -- ^ Parent block header - -> [Timestamp] -- ^ Timestamps of previous 11 blocks - -> Timestamp -- ^ Previous difficulty change - -> Maybe Word32 -- ^ Height of most recent checkpoint - -> MinWork -- ^ Last MinWork (e.g. Testnet3) - -> Timestamp -- ^ Current time - -> BlockHeader -- ^ Block header to validate - -> Either String () --- TODO: Add DOS return values -verifyBlockHeader par mts dt cp mw ts bh = do - unless (isValidPOW bh) $ - Left "Invalid proof of work" - - unless (blockTimestamp bh <= ts + 2 * 60 * 60) $ - Left "Invalid header timestamp" - - let nextWork = nextWorkRequired par dt mw bh - unless (blockBits bh == nextWork) $ - Left "Incorrect work transition (bits)" - - let sortedMedians = sort mts - medianTime = sortedMedians !! (length sortedMedians `div` 2) - when (blockTimestamp bh <= medianTime) $ - Left "Block timestamp is too early" - - let newHeight = nodeBlockHeight par + 1 - unless (maybe True (fromIntegral newHeight >) cp) $ - Left "Rewriting pre-checkpoint chain" - - unless (verifyCheckpoint (fromIntegral newHeight) (headerHash bh)) $ - Left "Rejected by checkpoint lock-in" - - -- All block of height 227836 or more use version 2 in prodnet - -- TODO: Find out the value here for testnet - when (networkName == "prodnet" - && blockVersion bh == 1 - && nodeBlockHeight par + 1 >= 227836) $ - Left "Rejected version 1 block" - --- | Create a block node data structure from a block header. -nodeBlock :: NodeBlock -- ^ Parent block node - -> Word32 -- ^ Chain number for new node - -> BlockHeader - -> NodeBlock -nodeBlock parent chain bh = NodeBlock - { nodeBlockHash = shortHash $ headerHash bh - , nodeBlockHeader = NodeHeader bh - , nodeBlockWork = newWork - , nodeBlockHeight = height - , nodeBlockChain = chain - } - where - newWork = nodeBlockWork parent + fromIntegral - (headerWork bh `div` headerWork genesisHeader) - height = nodeBlockHeight parent + 1 - --- | Return blockchain action to connect given block with best block. Count will --- limit the amount of blocks building up from split point towards the best --- block. -getBlockWindow :: MonadIO m - => NodeBlock -- ^ Best block - -> NodeBlock -- ^ Start of window - -> Word32 -- ^ Window count - -> SqlPersistT m BlockChainAction -getBlockWindow best node cnt = do - (_, old, new) <- splitChains (node, 0) (best, cnt) - return $ if null old then BestChain new else ChainReorg node old new - --- | Find the split point between two nodes. It also returns the two partial --- chains leading from the split point to the respective nodes. Tuples must --- contain a block node and the count of nodes that should be returned from the --- split towards that block. 0 means all. -splitChains :: MonadIO m - => (NodeBlock, Word32) - -> (NodeBlock, Word32) - -> SqlPersistT m (NodeBlock, [NodeBlock], [NodeBlock]) -splitChains (l, ln) (r, rn) = do - sn <- splitBlock l r - (split:ls) <- getBlocksFromHeight l ln (nodeBlockHeight sn) - rs <- getBlocksFromHeight r rn (nodeBlockHeight sn + 1) - return (split, ls, rs) - --- | Finds the parent of a block. -getParentBlock :: MonadIO m - => NodeBlock - -> SqlPersistT m (Maybe NodeBlock) -getParentBlock node - | nodeBlockHeight node == 0 = return Nothing - | otherwise = getBlockByHash p - where - p = nodePrev node - --- | Get all children for a block -getChildBlocks :: MonadIO m - => BlockHash - -> SqlPersistT m [NodeBlock] -getChildBlocks h = do - ch <- (+1) . nodeBlockHeight . fromMaybe e <$> getBlockByHash h - filter ((==h) . nodePrev) <$> getBlocksAtHeight ch - where - e = error $ "Cannot find block hash " ++ cs (blockHashToHex h) - - --- | Get the last checkpoint that we have seen. -lastSeenCheckpoint :: MonadIO m - => SqlPersistT m (Maybe NodeBlock) -lastSeenCheckpoint = - fmap listToMaybe $ getBlocksByHash $ map snd $ reverse checkpointList - --- | Returns the work required for a block header given the previous block. This --- coresponds to bitcoind function GetNextWorkRequired in main.cpp. -nextWorkRequired :: NodeBlock - -> Timestamp - -> MinWork - -> BlockHeader - -> Word32 -nextWorkRequired par ts mw bh - -- Genesis block - | nodeBlockHeight par == 0 = encodeCompact powLimit - -- Only change the difficulty once per interval - | (nodeBlockHeight par + 1) `mod` diffInterval /= 0 = - if allowMinDifficultyBlocks - then minPOW - else blockBits $ nHeader par - | otherwise = workFromInterval ts (nHeader par) - where - delta = targetSpacing * 2 - minPOW - | blockTimestamp bh > nodeTimestamp par + delta = encodeCompact powLimit - | otherwise = mw - --- | Computes the work required for the next block given a timestamp and the --- current block. The timestamp should come from the block that matched the --- last jump in difficulty (spaced out by 2016 blocks in prodnet). -workFromInterval :: Timestamp -> BlockHeader -> Word32 -workFromInterval ts lastB - | newDiff > powLimit = encodeCompact powLimit - | otherwise = encodeCompact newDiff - where - t = fromIntegral $ blockTimestamp lastB - ts - actualTime - | t < targetTimespan `div` 4 = targetTimespan `div` 4 - | t > targetTimespan * 4 = targetTimespan * 4 - | otherwise = t - lastDiff = fst $ decodeCompact $ blockBits lastB - newDiff = lastDiff * toInteger actualTime `div` toInteger targetTimespan - --- | Returns the work represented by this block. Work is defined as the number --- of tries needed to solve a block in the average case with respect to the --- target. -headerWork :: BlockHeader -> Integer -headerWork bh = - fromIntegral $ largestHash `div` (target + 1) - where - target = fst $ decodeCompact (blockBits bh) - largestHash = 1 `shiftL` 256 - -{- Persistent backend -} - -chainPathQuery :: forall (expr :: * -> *) (query :: * -> *) backend. - Esqueleto query expr backend - => expr (Entity NodeBlock) - -> [NodeBlock] - -> expr (Value Bool) -chainPathQuery _ [] = error "Monsters, monsters everywhere" - -chainPathQuery t [NodeBlock{..}] = - t ^. NodeBlockHeight <=. val nodeBlockHeight &&. - t ^. NodeBlockChain ==. val nodeBlockChain - -chainPathQuery t (n1:bs@(n2:_)) = chainPathQuery t bs ||. - ( t ^. NodeBlockHeight <=. val (nodeBlockHeight n1) - &&. t ^. NodeBlockHeight >. val (nodeBlockHeight n2) - &&. t ^. NodeBlockChain ==. val (nodeBlockChain n1) - ) - -getHeads :: MonadIO m => SqlPersistT m [NodeBlock] -getHeads = fmap (map (entityVal . snd)) $ select $ from $ \t -> do - groupBy $ t ^. NodeBlockChain - return (max_ (t ^. NodeBlockHeight), t) - --- | Chain for new block building on a parent node -getChain :: MonadIO m - => NodeBlock -- ^ Parent node - -> SqlPersistT m Word32 -getChain parent = do - maxHeightM <- fmap (unValue <=< listToMaybe) $ select $ from $ \t -> do - where_ $ t ^. NodeBlockChain ==. val (nodeBlockChain parent) - return $ max_ $ t ^. NodeBlockHeight - let maxHeight = fromMaybe (error "That chain does not exist") maxHeightM - if maxHeight == nodeBlockHeight parent - then return $ nodeBlockChain parent - else do - maxChainM <- fmap (unValue <=< listToMaybe) $ select $ from $ \t -> - return $ max_ $ t ^. NodeBlockChain - let maxChain = fromMaybe (error "Ran out of chains") maxChainM - return $ maxChain + 1 - -getPivots :: MonadIO m => NodeBlock -> SqlPersistT m [NodeBlock] -getPivots = go [] - where - go acc b - | nodeBlockChain b == 0 = return $ nGenesisBlock : b : acc - | otherwise = do - l <- fromMaybe (error "Houston, we have a problem") <$> - getChainLowest b - c <- fromMaybe (error "Ground Control to Major Tom") <$> - getParentBlock l - go (b:acc) c - -getChainLowest :: MonadIO m => NodeBlock -> SqlPersistT m (Maybe NodeBlock) -getChainLowest nb = fmap (listToMaybe . map entityVal) $ - select $ from $ \t -> do - where_ $ t ^. NodeBlockChain ==. val (nodeBlockChain nb) - orderBy [ asc $ t ^. NodeBlockHeight ] - limit 1 - return t - --- | Get node height and chain common to both given. -splitBlock :: MonadIO m - => NodeBlock - -> NodeBlock - -> SqlPersistT m NodeBlock -splitBlock l r = if nodeBlockChain l == nodeBlockChain r - then if nodeBlockHeight l < nodeBlockHeight r - then return l - else return r - else do - pivotsL <- getPivots l - pivotsR <- getPivots r - let ns = zip pivotsL pivotsR - f (x,y) = nodeBlockChain x == nodeBlockChain y - (one, two) = last $ takeWhile f ns - if nodeBlockHeight one < nodeBlockHeight two - then return one - else return two - --- | Put single block in database. -putBlock :: MonadIO m => NodeBlock -> SqlPersistT m () -putBlock = insert_ - --- | Put multiple blocks in database. -putBlocks :: MonadIO m => [NodeBlock] -> SqlPersistT m () -putBlocks = mapM_ insertMany_ . f - where - f [] = [] - f xs = let (xs',xxs) = splitAt 50 xs in xs' : f xxs - -getBestBlock :: MonadIO m => SqlPersistT m NodeBlock -getBestBlock = - maximumBy (compare `on` nodeBlockWork) <$> getHeads - -getBlockByHash :: MonadIO m => BlockHash -> SqlPersistT m (Maybe NodeBlock) -getBlockByHash h = - fmap (listToMaybe . map entityVal) $ select $ from $ \t -> do - where_ $ t ^. NodeBlockHash ==. val (shortHash h) - return t - --- | Get multiple blocks corresponding to given hashes -getBlocksByHash :: MonadIO m - => [BlockHash] - -> SqlPersistT m [NodeBlock] -getBlocksByHash hashes = do - nodes <- fmap (map entityVal) $ select $ from $ \t -> do - where_ $ t ^. NodeBlockHash `in_` valList (map shortHash hashes) - return t - return $ mapMaybe - (\h -> find ((== shortHash h) . nodeBlockHash) nodes) - hashes - --- | Get ancestor of specified block at given height. -getBlockByHeight :: MonadIO m - => NodeBlock -- ^ Best block - -> BlockHeight - -> SqlPersistT m (Maybe NodeBlock) -getBlockByHeight block height = do - forks <- reverse <$> getPivots block - fmap (listToMaybe . map entityVal) $ select $ from $ \t -> do - where_ $ chainPathQuery t forks &&. - t ^. NodeBlockHeight ==. val height - return t - --- | Get ancestors for specified block at given heights. -getBlocksByHeight :: MonadIO m - => NodeBlock -- ^ Best block - -> [BlockHeight] - -> SqlPersistT m [NodeBlock] -getBlocksByHeight best heights = do - forks <- reverse <$> getPivots best - nodes <- fmap (map entityVal) $ select $ from $ \t -> do - where_ $ chainPathQuery t forks &&. - t ^. NodeBlockHeight `in_` valList heights - return t - return $ mapMaybe (\h -> find ((==h) . nodeBlockHeight) nodes) heights - --- | Get a range of block headers building up to specified block. If --- specified height is too large, an empty list will be returned. -getBlocksFromHeight :: MonadIO m - => NodeBlock -- ^ Best block - -> Word32 -- ^ Count (0 for all) - -> BlockHeight -- ^ Height from (including) - -> SqlPersistT m [NodeBlock] -getBlocksFromHeight block cnt height = do - forks <- reverse <$> getPivots block - fmap (map entityVal) $ select $ from $ \t -> do - where_ $ chainPathQuery t forks &&. - t ^. NodeBlockHeight >=. val height - when (cnt > 0) $ limit $ fromIntegral cnt - return t - --- | Get node immediately at or after timestamp in main chain. -getBlockAfterTime :: MonadIO m => Timestamp -> SqlPersistT m (Maybe NodeBlock) -getBlockAfterTime ts = do - n@NodeBlock{..} <- getBestBlock - f nGenesisBlock n - where - f l r | nodeTimestamp r < ts = - return Nothing - | nodeTimestamp l >= ts = - return $ Just l - | (nodeBlockHeight r - nodeBlockHeight l) `div` 2 == 0 = - return $ Just r - | otherwise = do - let rh = nodeBlockHeight r - lh = nodeBlockHeight l - mh = rh - (rh - lh) `div` 2 - m <- fromMaybe (error "My God, it’s full of stars!") <$> - getBlockByHeight r mh - if nodeTimestamp m > ts then f l m else f m r - --- | Get blocks at specified height in all chains. -getBlocksAtHeight :: MonadIO m => BlockHeight -> SqlPersistT m [NodeBlock] -getBlocksAtHeight height = fmap (map entityVal) $ select $ from $ \t -> do - where_ $ t ^. NodeBlockHeight ==. val height - return t - --- | Evaluate block action for provided best block and chain of new blocks. -evalNewChain :: MonadIO m - => NodeBlock - -> [NodeBlock] - -> SqlPersistT m BlockChainAction -evalNewChain _ [] = error "You find yourself in the dungeon of missing blocks" -evalNewChain best newNodes - | buildsOnBest = - return $ BestChain newNodes - | nodeBlockWork (last newNodes) > nodeBlockWork best = do - (split, old, new) <- splitChains (best, 0) (head newNodes, 0) - return $ ChainReorg split old (new ++ tail newNodes) - | otherwise = do - (split, _, new) <- splitChains (best, 0) (head newNodes, 0) - case new of - [] -> return $ KnownChain newNodes - _ -> return $ SideChain $ split : new ++ tail newNodes - where - buildsOnBest = nodePrev (head newNodes) == nodeHash best - --- | Remove all other chains from database and return updated best block node. -pruneChain :: MonadIO m - => NodeBlock - -> SqlPersistT m NodeBlock -pruneChain best = if nodeBlockChain best == 0 then return best else do - forks <- reverse <$> getPivots best - delete $ from $ \t -> where_ $ not_ (chainPathQuery t forks) - update $ \t -> do - set t [ NodeBlockChain =. val 0 ] - where_ $ t ^. NodeBlockHeight <=. val (nodeBlockHeight best) - &&. t ^. NodeBlockChain !=. val 0 - return best{ nodeBlockChain = 0 } diff --git a/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Model.hs b/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Model.hs deleted file mode 100644 index 98ab8ed5..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Model.hs +++ /dev/null @@ -1,28 +0,0 @@ -{-# LANGUAGE EmptyDataDecls #-} -{-# LANGUAGE GADTs #-} -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE QuasiQuotes #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE TypeFamilies #-} -module Network.Haskoin.Node.HeaderTree.Model where - -import Data.Word (Word32) -import Database.Persist.TH (mkMigrate, mkPersist, - persistLowerCase, share, - sqlSettings) -import Network.Haskoin.Block -import Network.Haskoin.Node.HeaderTree.Types - -share [mkPersist sqlSettings, mkMigrate "migrateHeaderTree"] [persistLowerCase| -NodeBlock - hash ShortHash - header NodeHeader maxlen=80 - work Work - height BlockHeight - chain Word32 - UniqueHash hash - UniqueChain chain height - deriving Show - deriving Eq -|] diff --git a/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Types.hs b/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Types.hs deleted file mode 100644 index 1cc5e405..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/HeaderTree/Types.hs +++ /dev/null @@ -1,28 +0,0 @@ -module Network.Haskoin.Node.HeaderTree.Types where - -import Data.Serialize (decode, encode) -import Data.String (fromString) -import Data.Word (Word64) -import Database.Persist (PersistField (..), PersistValue (..), - SqlType (..)) -import Database.Persist.Sql (PersistFieldSql (..)) -import Network.Haskoin.Block - -type ShortHash = Word64 -type Work = Double - -newtype NodeHeader = NodeHeader { getNodeHeader :: BlockHeader } - deriving (Show, Eq) - -{- SQL database backend for HeaderTree -} - -instance PersistField NodeHeader where - toPersistValue = PersistByteString . encode . getNodeHeader - fromPersistValue (PersistByteString bs) = - case decode bs of - Right x -> Right (NodeHeader x) - Left e -> Left (fromString e) - fromPersistValue _ = Left "Invalid persistent block header" - -instance PersistFieldSql NodeHeader where - sqlType _ = SqlBlob diff --git a/haskoin-node/src/Network/Haskoin/Node/Peer.hs b/haskoin-node/src/Network/Haskoin/Node/Peer.hs deleted file mode 100644 index 92876bd5..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/Peer.hs +++ /dev/null @@ -1,720 +0,0 @@ -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Node.Peer where - -import Control.Concurrent (killThread, myThreadId, - threadDelay) -import Control.Concurrent.Async.Lifted (link, race, waitAnyCancel, - waitCatch, withAsync) -import Control.Concurrent.STM (STM, atomically, modifyTVar', - newTVarIO, readTVar, retry, - swapTVar) -import Control.Concurrent.STM.TBMChan (TBMChan, closeTBMChan, - newTBMChan, writeTBMChan) -import Control.Exception (AsyncException(ThreadKilled)) -import Control.Exception.Lifted (finally, fromException, throw, - throwIO) -import Control.Monad (forM_, forever, join, unless, - when) -import Control.Monad.Logger (MonadLoggerIO, logDebug, - logError, logInfo, logWarn) -import Control.Monad.Reader (asks) -import Control.Monad.State (StateT, evalStateT, get, put) -import Control.Monad.Trans (MonadIO, lift, liftIO) -import Control.Monad.Trans.Control (MonadBaseControl) -import Data.Bits (testBit) -import qualified Data.ByteString as BS (ByteString, append, - null) -import qualified Data.ByteString.Char8 as C (pack) -import qualified Data.ByteString.Lazy as BL (toStrict) -import Data.Conduit (Conduit, Sink, awaitForever, - yield, ($$), ($=)) -import qualified Data.Conduit.Binary as CB (take) -import Data.Conduit.Network (appSink, appSource, - clientSettings, - runGeneralTCPClient) -import Data.Conduit.TMChan (sourceTBMChan) -import Data.List (nub, sort, sortBy) -import qualified Data.Map as M (assocs, elems, fromList, - keys, lookup, unionWith) -import Data.Maybe (fromMaybe, isJust, - listToMaybe) -import Data.Serialize (decode, encode) -import Data.String.Conversions (cs) -import Data.Text (Text, pack) -import Data.Time.Clock (diffUTCTime, getCurrentTime) -import Data.Time.Clock.POSIX (getPOSIXTime) -import Data.Unique (hashUnique, newUnique) -import Data.Word (Word32) -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Network -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Node.STM -import Network.Haskoin.Transaction -import Network.Haskoin.Util -import Network.Socket (SockAddr (SockAddrInet)) -import System.Random (randomIO) - --- TODO: Move constants elsewhere ? -minProtocolVersion :: Word32 -minProtocolVersion = 70001 - --- Start a reconnecting peer that will idle once the connection is established --- and the handshake is performed. -startPeer :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerHost - -> NodeT m () -startPeer ph@PeerHost{..} = do - -- Create a new unique ID for this peer - pid <- liftIO newUnique - -- Start the peer with the given PID - startPeerPid pid ph - --- Start a peer that will try to reconnect when the connection is closed. The --- reconnections are performed using an expoential backoff time. This function --- blocks until the peer cannot reconnect (either the peer is banned or we --- already have a peer connected to the given peer host). -startReconnectPeer :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerHost - -> NodeT m () -startReconnectPeer ph@PeerHost{..} = do - -- Create a new unique ID for this peer - pid <- liftIO newUnique - -- Wait if there is a reconnection timeout - maybeWaitReconnect pid - -- Launch the peer - withAsync (startPeerPid pid ph) $ \a -> do - resE <- liftIO $ waitCatch a - reconnect <- case resE of - Left se -> do - $(logError) $ formatPid pid ph $ unwords - [ "Peer thread stopped with exception:", show se ] - return $ case fromException se of - Just NodeExceptionBanned -> False - Just NodeExceptionConnected -> False - Just (NodeExceptionInvalidPeer _) -> False - _ -> fromException se /= Just ThreadKilled - Right _ -> do - $(logDebug) $ formatPid pid ph "Peer thread stopped" - return True - -- Try to reconnect - when reconnect $ startReconnectPeer ph - where - maybeWaitReconnect pid = do - reconnect <- atomicallyNodeT $ do - sessM <- getHostSession ph - case sessM of - Just PeerHostSession{..} -> do - -- Compute the new reconnection time (max 15 minutes) - let reconnect = min 900 $ 2 * peerHostSessionReconnect - -- Save the reconnection time - modifyHostSession ph $ \s -> - s{ peerHostSessionReconnect = reconnect } - return reconnect - _ -> return 0 - - when (reconnect > 0) $ do - $(logInfo) $ formatPid pid ph $ unwords - [ "Reconnecting peer in", show reconnect, "seconds" ] - -- Wait for some time before calling a reconnection - liftIO $ threadDelay $ reconnect * 1000000 - --- Start a peer with with the given peer host/peer id and initiate the --- network protocol handshake. This function will block until the peer --- connection is closed or an exception is raised. -startPeerPid :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> NodeT m () -startPeerPid pid ph@PeerHost{..} = do - -- Check if the peer host is banned - banned <- atomicallyNodeT $ isPeerHostBanned ph - when banned $ do - $(logWarn) $ formatPid pid ph "Failed to start banned host" - liftIO $ throwIO NodeExceptionBanned - - -- Check if the peer host is already connected - connected <- atomicallyNodeT $ isPeerHostConnected ph - when connected $ do - $(logWarn) $ formatPid pid ph "This host is already connected" - liftIO $ throwIO NodeExceptionConnected - - tid <- liftIO myThreadId - chan <- liftIO . atomically $ newTBMChan 1024 - mChan <- liftIO . atomically $ newTBMChan 1024 - pings <- liftIO $ newTVarIO [] - atomicallyNodeT $ do - newPeerSession pid PeerSession - { peerSessionConnected = False - , peerSessionVersion = Nothing - , peerSessionHeight = 0 - , peerSessionChan = chan - , peerSessionHost = ph - , peerSessionThreadId = tid - , peerSessionMerkleChan = mChan - , peerSessionPings = pings - , peerSessionScore = Nothing - } - newHostSession ph PeerHostSession - { peerHostSessionScore = 0 - , peerHostSessionReconnect = 1 - , peerHostSessionLog = [] - } - - $(logDebug) $ formatPid pid ph "Starting a new client TCP connection" - - -- Start the client TCP connection - let c = clientSettings peerPort $ C.pack peerHost - runGeneralTCPClient c (peerTCPClient chan) `finally` cleanupPeer - return () - where - peerTCPClient chan ad = do - -- Conduit for receiving messages from the remote host - let recvMsg = appSource ad $$ decodeMessage pid ph - -- Conduit for sending messages to the remote host - sendMsg = sourceTBMChan chan $= encodeMessage $$ appSink ad - - withAsync (evalStateT recvMsg Nothing) $ \a1 -> link a1 >> do - $(logDebug) $ formatPid pid ph - "Receiving message thread started..." - withAsync sendMsg $ \a2 -> link a2 >> do - $(logDebug) $ formatPid pid ph - "Sending message thread started..." - -- Perform the peer handshake before we continue - -- Timeout after 2 minutes - resE <- raceTimeout 120 (disconnectPeer pid ph) - (peerHandshake pid ph chan) - case resE of - Left _ -> $(logError) $ formatPid pid ph - "Peer timed out during the connection handshake" - _ -> do - -- Send the bloom filter if we have one - $(logDebug) $ formatPid pid ph - "Sending the bloom filter if we have one" - atomicallyNodeT $ do - bloomM <- readTVarS sharedBloomFilter - case bloomM of - Just (bloom, _) -> - sendMessage pid $ - MFilterLoad $ FilterLoad bloom - _ -> return () - withAsync (peerPing pid ph) $ \a3 -> link a3 >> do - $(logDebug) $ formatPid pid ph "Ping thread started" - _ <- liftIO $ waitAnyCancel [a1, a2, a3] - $(logDebug) $ formatPid pid ph "Exiting peer TCP thread" - return () - - cleanupPeer = do - $(logWarn) $ formatPid pid ph "Peer is closing. Running cleanup..." - atomicallyNodeT $ do - -- Remove the header syncing peer if necessary - hPidM <- readTVarS sharedHeaderPeer - when (hPidM == Just pid) $ writeTVarS sharedHeaderPeer Nothing - -- Remove the merkle syncing peer if necessary - mPidM <- readTVarS sharedMerklePeer - when (mPidM == Just pid) $ writeTVarS sharedMerklePeer Nothing - -- Remove the session and close the channels - sessM <- removePeerSession pid - case sessM of - Just PeerSession{..} -> lift $ do - closeTBMChan peerSessionChan - closeTBMChan peerSessionMerkleChan - _ -> return () - -- Update the network height - updateNetworkHeight - --- Return True if the PeerHost is banned -isPeerHostBanned :: PeerHost -> NodeT STM Bool -isPeerHostBanned ph = do - hostMap <- readTVarS sharedHostMap - case M.lookup ph hostMap of - Just sessTVar -> do - sess <- lift $ readTVar sessTVar - return $ isHostScoreBanned $ peerHostSessionScore sess - _ -> return False - --- Returns True if we have a peer connected to that PeerHost already -isPeerHostConnected :: PeerHost -> NodeT STM Bool -isPeerHostConnected ph = do - peerMap <- readTVarS sharedPeerMap - sess <- lift $ mapM readTVar $ M.elems peerMap - return $ ph `elem` map peerSessionHost sess - --- | Decode messages sent from the remote host and send them to the peers main --- message queue for processing. If we receive invalid messages, this function --- will also notify the PeerManager about a misbehaving remote host. -decodeMessage - :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> Sink BS.ByteString (StateT (Maybe (MerkleBlock, MerkleTxs)) (NodeT m)) () -decodeMessage pid ph = do - -- Message header is always 24 bytes - headerBytes <- BL.toStrict <$> CB.take 24 - -- If headerBytes is empty, the conduit has disconnected and we need to - -- exit (not recurse). Otherwise, we go into an infinite loop here. - unless (BS.null headerBytes) $ do - -- Introspection required to know the length of the payload - case decode headerBytes of - Left err -> lift . lift $ misbehaving pid ph moderateDoS $ unwords - [ "Could not decode message header:", err - , "Bytes:", cs (encodeHex headerBytes) - ] - Right (MessageHeader _ cmd len _) -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received message header of type", show cmd ] - payloadBytes <- BL.toStrict <$> CB.take (fromIntegral len) - case decode $ headerBytes `BS.append` payloadBytes of - Left err -> lift . lift $ misbehaving pid ph moderateDoS $ - unwords [ "Could not decode message payload:", err ] - Right msg -> lift $ processMessage pid ph msg - decodeMessage pid ph - --- Handle a message from a peer -processMessage :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> Message - -> StateT (Maybe (MerkleBlock, MerkleTxs)) (NodeT m) () -processMessage pid ph msg = checkMerkleEnd >> case msg of - MVersion v -> lift $ do - $(logDebug) $ formatPid pid ph "Processing MVersion message" - join . atomicallyNodeT $ do - oldVerM <- peerSessionVersion <$> getPeerSession pid - case oldVerM of - Just _ -> do - _ <- trySendMessage pid $ MReject $ reject - MCVersion RejectDuplicate "Duplicate version message" - return $ - misbehaving pid ph minorDoS "Duplicate version message" - Nothing -> do - modifyPeerSession pid $ \s -> - s{ peerSessionVersion = Just v } - return $ return () - $(logDebug) $ formatPid pid ph "Done processing MVersion message" - MPing (Ping nonce) -> lift $ do - $(logDebug) $ formatPid pid ph "Processing MPing message" - -- Just reply to the Ping with a Pong message - _ <- atomicallyNodeT $ trySendMessage pid $ MPong $ Pong nonce - return () - MPong (Pong nonce) -> lift $ do - $(logDebug) $ formatPid pid ph "Processing MPong message" - atomicallyNodeT $ do - PeerSession{..} <- getPeerSession pid - -- Add the Pong response time - lift $ modifyTVar' peerSessionPings (++ [nonce]) - MHeaders h -> lift $ do - $(logDebug) $ formatPid pid ph "Processing MHeaders message" - _ <- atomicallyNodeT $ tryPutTMVarS sharedHeaders (pid, h) - return () - MInv inv -> lift $ do - $(logDebug) $ formatPid pid ph "Processing MInv message" - processInvMessage pid ph inv - MGetData (GetData inv) -> do - $(logDebug) $ formatPid pid ph "Processing MGetData message" - let txlist = filter ((== InvTx) . invType) inv - txids = nub $ map (TxHash . invHash) txlist - $(logDebug) $ formatPid pid ph $ unlines $ - "Received GetData request for transactions" - : map ((" " ++) . cs . txHashToHex) txids - -- Add the txids to the GetData request map - mapTVar <- asks sharedTxGetData - liftIO . atomically $ modifyTVar' mapTVar $ \datMap -> - let newMap = M.fromList $ map (\tid -> (tid, [(pid, ph)])) txids - in M.unionWith (\x -> nub . (x ++)) newMap datMap - MTx tx -> do - $(logDebug) $ formatPid pid ph "Processing MTx message" - PeerSession{..} <- lift . atomicallyNodeT $ getPeerSession pid - txChan <- lift $ asks sharedTxChan - get >>= \merkleM -> case merkleM of - Just (_, mTxs) -> if txHash tx `elem` mTxs - then do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received merkle tx", cs $ txHashToHex $ txHash tx ] - liftIO . atomically $ - writeTBMChan peerSessionMerkleChan $ Right tx - else do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received tx broadcast (ending a merkle block)" - , cs $ txHashToHex $ txHash tx - ] - endMerkle - liftIO . atomically $ writeTBMChan txChan (pid, ph, tx) - _ -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received tx broadcast", cs $ txHashToHex $ txHash tx ] - liftIO . atomically $ writeTBMChan txChan (pid, ph, tx) - MMerkleBlock mb@(MerkleBlock mHead ntx hs fs) -> do - $(logDebug) $ formatPid pid ph "Processing MMerkleBlock message" - case extractMatches fs hs (fromIntegral ntx) of - Left err -> lift $ misbehaving pid ph severeDoS $ unwords - [ "Received an invalid merkle block:", err ] - Right (decodedRoot, mTxs) -> - -- Make sure that the merkle roots match - if decodedRoot == merkleRoot mHead - then do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received valid merkle block" - , cs $ blockHashToHex $ headerHash mHead - ] - forM_ mTxs $ \h -> - $(logDebug) $ formatPid pid ph $ unwords - [ "Matched merkle tx:", cs $ txHashToHex h ] - if null mTxs - -- Deliver the merkle block - then lift . atomicallyNodeT $ do - PeerSession{..} <- getPeerSession pid - lift $ writeTBMChan peerSessionMerkleChan $ - Left (mb, []) - -- Buffer the merkle block until we received all txs - else put $ Just (mb, mTxs) - else lift $ misbehaving pid ph severeDoS - "Received a merkle block with an invalid merkle root" - _ -> return () -- Ignore other requests - where - checkMerkleEnd = unless (isTxMsg msg) endMerkle - endMerkle = get >>= \merkleM -> case merkleM of - Just (mb, mTxs) -> do - lift . atomicallyNodeT $ do - PeerSession{..} <- getPeerSession pid - lift $ writeTBMChan peerSessionMerkleChan $ Left (mb, mTxs) - put Nothing - _ -> return () - isTxMsg (MTx _) = True - isTxMsg _ = False - -processInvMessage :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> Inv - -> NodeT m () -processInvMessage pid ph (Inv vs) = case tickleM of - Just tickle -> do - $(logDebug) $ formatPid pid ph $ unwords - [ "Received block tickle", cs $ blockHashToHex tickle ] - tickleChan <- asks sharedTickleChan - liftIO $ atomically $ writeTBMChan tickleChan (pid, ph, tickle) - _ -> do - unless (null txlist) $ do - forM_ txlist $ \tid -> $(logDebug) $ formatPid pid ph $ unwords - [ "Received transaction INV", cs (txHashToHex tid) ] - -- We simply request the transactions. - -- TODO: Should we do something more elaborate here? - atomicallyNodeT $ sendMessage pid $ MGetData $ GetData $ - map (InvVector InvTx . getTxHash) txlist - unless (null blocklist) $ do - $(logDebug) $ formatPid pid ph $ unlines $ - "Received block INV" - : map ((" " ++) . cs . blockHashToHex) blocklist - -- We ignore block INVs as we do headers-first sync - return () - where - -- Single blockhash INV is a tickle - tickleM = case blocklist of - [h] -> if null txlist then Just h else Nothing - _ -> Nothing - txlist :: [TxHash] - txlist = map (TxHash . invHash) $ - filter ((== InvTx) . invType) vs - blocklist :: [BlockHash] - blocklist = map (BlockHash . invHash) $ filter ((== InvBlock) . invType) vs - --- | Encode message that are being sent to the remote host. -encodeMessage :: MonadLoggerIO m - => Conduit Message (NodeT m) BS.ByteString -encodeMessage = awaitForever $ yield . encode - -peerPing :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> NodeT m () -peerPing pid ph = forever $ do - $(logDebug) $ formatPid pid ph - "Waiting until the peer is available for sending pings..." - atomicallyNodeT $ waitPeerAvailable pid - - nonce <- liftIO randomIO - nonceTVar <- atomicallyNodeT $ do - PeerSession{..} <- getPeerSession pid - sendMessage pid $ MPing $ Ping nonce - return peerSessionPings - - $(logDebug) $ formatPid pid ph $ unwords - [ "Waiting for Ping nonce", show nonce ] - -- Wait 120 seconds for the pong or time out - startTime <- liftIO getCurrentTime - resE <- raceTimeout 120 (killPeer nonce) (waitPong nonce nonceTVar) - case resE of - Right _ -> do - endTime <- liftIO getCurrentTime - (diff, score) <- atomicallyNodeT $ do - PeerSession{..} <- getPeerSession pid - -- Compute the ping time and the new score - let diff = diffUTCTime endTime startTime - score = 0.5 * diff + 0.5 * fromMaybe diff peerSessionScore - -- Save the score in the peer session unless the peer is busy - modifyPeerSession pid $ \s -> s{ peerSessionScore = Just score } - return (diff, score) - $(logDebug) $ formatPid pid ph $ unwords - [ "Got response to ping", show nonce - , "with time", show diff, "and score", show score - ] - _ -> return () - - -- Sleep 30 seconds before sending the next ping - liftIO $ threadDelay $ 30 * 1000000 - where - -- Wait for the Pong message of our Ping nonce to arrive - waitPong nonce nonceTVar = do - ns <- liftIO . atomically $ do - ns <- swapTVar nonceTVar [] - if null ns then retry else return ns - unless (nonce `elem` ns) $ waitPong nonce nonceTVar - killPeer nonce = do - $(logWarn) $ formatPid pid ph $ concat - [ "Did not receive a timely reply for Ping ", show nonce - , ". Reconnecting the peer." - ] - disconnectPeer pid ph - -isBloomDisabled :: Version -> Bool -isBloomDisabled ver = version ver >= 70011 && not (services ver `testBit` 2) - -peerHandshake :: (MonadLoggerIO m, MonadBaseControl IO m) - => PeerId - -> PeerHost - -> TBMChan Message - -> NodeT m () -peerHandshake pid ph chan = do - ourVer <- buildVersion - $(logDebug) $ formatPid pid ph "Sending our version message" - liftIO . atomically $ writeTBMChan chan $ MVersion ourVer - -- Wait for the peer version message to arrive - $(logDebug) $ formatPid pid ph "Waiting for the peers version message..." - peerVer <- atomicallyNodeT $ waitPeerVersion pid - $(logInfo) $ formatPid pid ph $ unlines - [ unwords [ "Connected to peer host" - , show $ naAddress $ addrSend peerVer - ] - , unwords [ " version :", show $ version peerVer ] - , unwords [ " subVer :", show $ userAgent peerVer ] - , unwords [ " services :", show $ services peerVer ] - , unwords [ " time :", show $ timestamp peerVer ] - , unwords [ " blocks :", show $ startHeight peerVer ] - ] - - -- Check the protocol version - go peerVer $ do - atomicallyNodeT $ do - -- Save the peers height and update the network height - modifyPeerSession pid $ \s -> - s{ peerSessionHeight = startHeight peerVer - , peerSessionConnected = True - } - updateNetworkHeight - -- Reset the reconnection timer (exponential backoff) - modifyHostSession ph $ \s -> - s{ peerHostSessionReconnect = 1 } - -- ACK the version message - lift $ writeTBMChan chan MVerAck - $(logDebug) $ formatPid pid ph "Handshake complete" - where - go ver action - | version ver < minProtocolVersion = - misbehaving pid ph severeDoS $ unwords - [ "Connected to a peer speaking protocol version" - , show $ version ver - , "but we require at least" - , show minProtocolVersion - ] - | isBloomDisabled ver = - misbehaving pid ph severeDoS "Peer does not support bloom filters" - | otherwise = action - buildVersion = do - -- TODO: Get our correct IP here - let add = NetworkAddress 1 $ SockAddrInet 0 0 - ua = VarString haskoinUserAgent - time <- floor <$> liftIO getPOSIXTime - rdmn <- liftIO randomIO -- nonce - height <- nodeBlockHeight <$> atomicallyNodeT (readTVarS sharedBestHeader) - return Version { version = 70011 - , services = 5 - , timestamp = time - , addrRecv = add - , addrSend = add - , verNonce = rdmn - , userAgent = ua - , startHeight = height - , relay = False - } - --- Wait for the version message of a peer and return it -waitPeerVersion :: PeerId -> NodeT STM Version -waitPeerVersion pid = do - PeerSession{..} <- getPeerSession pid - case peerSessionVersion of - Just ver -> return ver - _ -> lift retry - --- Delete the session of a peer and send a kill signal to the peers thread. --- Unless the peer is banned, the peer will try to reconnect. -disconnectPeer :: (MonadLoggerIO m) - => PeerId - -> PeerHost - -> NodeT m () -disconnectPeer pid ph = do - sessM <- atomicallyNodeT $ tryGetPeerSession pid - case sessM of - Just PeerSession{..} -> do - $(logDebug) $ formatPid pid ph "Killing the peer thread" - liftIO $ killThread peerSessionThreadId - _ -> return () - -{- Peer utility functions -} - ---- Wait until the given peer is not syncing headers or merkle blocks -waitPeerAvailable :: PeerId -> NodeT STM () -waitPeerAvailable pid = do - hPidM <- readTVarS sharedHeaderPeer - mPidM <- readTVarS sharedMerklePeer - when (Just pid `elem` [hPidM, mPidM]) $ lift retry - --- Wait for a non-empty bloom filter to be available -waitBloomFilter :: NodeT STM BloomFilter -waitBloomFilter = - maybe (lift retry) (return . fst) =<< readTVarS sharedBloomFilter - -sendBloomFilter :: BloomFilter -> Int -> NodeT STM () -sendBloomFilter bloom elems = unless (isBloomEmpty bloom) $ do - oldBloomM <- readTVarS sharedBloomFilter - let oldElems = maybe 0 snd oldBloomM - -- Only update the bloom filter if the number of elements is larger - when (elems > oldElems) $ do - writeTVarS sharedBloomFilter $ Just (bloom, elems) - sendMessageAll $ MFilterLoad $ FilterLoad bloom - --- Returns the median height of all the peers -getMedianHeight :: NodeT STM BlockHeight -getMedianHeight = do - hs <- map (peerSessionHeight . snd) <$> getConnectedPeers - let (_,ms) = splitAt (length hs `div` 2) $ sort hs - return $ fromMaybe 0 $ listToMaybe ms - --- Set the network height to the median height of all peers. -updateNetworkHeight :: NodeT STM () -updateNetworkHeight = writeTVarS sharedNetworkHeight =<< getMedianHeight - -getPeers :: NodeT STM [(PeerId, PeerSession)] -getPeers = do - peerMap <- readTVarS sharedPeerMap - lift $ mapM f $ M.assocs peerMap - where - f (pid, sess) = (,) pid <$> readTVar sess - -getConnectedPeers :: NodeT STM [(PeerId, PeerSession)] -getConnectedPeers = filter (peerSessionConnected . snd) <$> getPeers - --- Returns a peer that is connected, at the network height and --- with the best score. -getPeersAtNetHeight :: NodeT STM [(PeerId, PeerSession)] -getPeersAtNetHeight = do - -- Find the current network height - height <- readTVarS sharedNetworkHeight - getPeersAtHeight (== height) - --- Find the best peer at the given height -getPeersAtHeight :: (BlockHeight -> Bool) - -> NodeT STM [(PeerId, PeerSession)] -getPeersAtHeight cmpHeight = do - peers <- filter f <$> getPeers - -- Choose the peer with the best score - return $ sortBy s peers - where - f (_, p) = - peerSessionConnected p && -- Only connected peers - isJust (peerSessionScore p) && -- Only peers with scores - cmpHeight (peerSessionHeight p) -- Only peers at the required height - s (_,a) (_,b) = peerSessionScore a `compare` peerSessionScore b - --- Send a message to a peer only if it is connected. It returns True on --- success. -trySendMessage :: PeerId -> Message -> NodeT STM Bool -trySendMessage pid msg = do - sessM <- tryGetPeerSession pid - lift $ case sessM of - Just PeerSession{..} -> - if peerSessionConnected - then writeTBMChan peerSessionChan msg >> return True - else return False -- The peer is not yet connected - _ -> return False -- The peer does not exist - --- Send a message to a peer only if it is connected. It returns True on --- success. Throws an exception if the peer does not exist or is not connected. -sendMessage :: PeerId -> Message -> NodeT STM () -sendMessage pid msg = do - PeerSession{..} <- getPeerSession pid - if peerSessionConnected - then lift $ writeTBMChan peerSessionChan msg - else throw $ NodeExceptionPeerNotConnected $ ShowPeerId pid - --- Send a message to all connected peers. -sendMessageAll :: Message -> NodeT STM () -sendMessageAll msg = do - peerMap <- readTVarS sharedPeerMap - forM_ (M.keys peerMap) $ \pid -> trySendMessage pid msg - -getNetworkHeight :: NodeT STM BlockHeight -getNetworkHeight = readTVarS sharedNetworkHeight - -misbehaving :: (MonadLoggerIO m) - => PeerId - -> PeerHost - -> (PeerHostScore -> PeerHostScore) - -> String - -> NodeT m () -misbehaving pid ph f msg = do - sessM <- atomicallyNodeT $ do - modifyHostSession ph $ \s -> - s{ peerHostSessionScore = f $! peerHostSessionScore s - , peerHostSessionLog = msg : peerHostSessionLog s - } - getHostSession ph - case sessM of - Just PeerHostSession{..} -> do - $(logWarn) $ formatPid pid ph $ unlines - [ "Misbehaving peer" - , unwords [ " Score:", show peerHostSessionScore ] - , unwords [ " Reason:", msg ] - ] - when (isHostScoreBanned peerHostSessionScore) $ - disconnectPeer pid ph - _ -> return () - -{- Run header tree database action -} - --- runHeaderTree :: MonadIO m => ReaderT L.DB IO a -> NodeT m a --- runHeaderTree action = undefined - -{- Utilities -} - -raceTimeout :: (MonadIO m, MonadBaseControl IO m) - => Int - -- ^ Timeout value in seconds - -> m a - -- ^ Action to run if the main action times out - -> m b - -- ^ Action to run until the time runs out - -> m (Either a b) -raceTimeout sec cleanup action = do - resE <- race (liftIO $ threadDelay (sec * 1000000)) action - case resE of - Right res -> return $ Right res - Left _ -> fmap Left cleanup - -formatPid :: PeerId -> PeerHost -> String -> Text -formatPid pid ph str = pack $ concat - [ "[Peer ", show $ hashUnique pid - , " | ", peerHostString ph, "] ", str - ] - diff --git a/haskoin-node/src/Network/Haskoin/Node/STM.hs b/haskoin-node/src/Network/Haskoin/Node/STM.hs deleted file mode 100644 index 74a45045..00000000 --- a/haskoin-node/src/Network/Haskoin/Node/STM.hs +++ /dev/null @@ -1,412 +0,0 @@ -{-# LANGUAGE KindSignatures #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Node.STM where - -import Control.Concurrent (ThreadId) -import Control.Concurrent.STM (STM, TMVar, TVar, atomically, - isEmptyTMVar, modifyTVar', - newEmptyTMVarIO, newTVar, - newTVarIO, orElse, putTMVar, - readTMVar, readTVar, - takeTMVar, tryPutTMVar, - tryReadTMVar, writeTVar) -import Control.Concurrent.STM.Lock (Lock) -import qualified Control.Concurrent.STM.Lock as Lock (new) -import Control.Concurrent.STM.TBMChan (TBMChan, closeTBMChan, - newTBMChan) -import Control.DeepSeq (NFData (..)) -import Control.Exception.Lifted (Exception, SomeException, - catch, fromException, throw) -import Control.Monad ((<=<)) -import Control.Monad.Logger (MonadLoggerIO, logDebug) -import Control.Monad.Reader (ReaderT, ask, asks, - runReaderT) -import Control.Monad.Trans (MonadIO, lift, liftIO) -import Control.Monad.Trans.Control (MonadBaseControl) -import Data.Aeson.TH (deriveJSON) -import qualified Data.Map.Strict as M (Map, delete, empty, - insert, lookup) -import Data.Maybe (isJust) -import Data.Time.Clock (NominalDiffTime) -import Data.Typeable (Typeable) -import Data.Unique (Unique, hashUnique) -import Data.Word (Word32, Word64) -import Database.Persist.Sql (ConnectionPool, SqlBackend, - SqlPersistT, runSqlConn, - runSqlPool) -import Network.Haskoin.Block -import Network.Haskoin.Network -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Transaction -import Network.Haskoin.Util - -{- Type aliases -} - -type MerkleTxs = [TxHash] -type NodeT = ReaderT SharedNodeState -type PeerId = Unique -type PeerHostScore = Word32 - -newtype ShowPeerId = ShowPeerId { getShowPeerId :: PeerId } - deriving (Eq) - -instance Show ShowPeerId where - show = show . hashUnique . getShowPeerId - -runSql :: (MonadBaseControl IO m) - => SqlPersistT m a - -> Either SqlBackend ConnectionPool - -> m a -runSql f (Left conn) = runSqlConn f conn -runSql f (Right pool) = runSqlPool f pool - -runSqlNodeT :: (MonadBaseControl IO m) => SqlPersistT m a -> NodeT m a -runSqlNodeT f = asks sharedSqlBackend >>= lift . runSql f - -getNodeState :: (MonadLoggerIO m, MonadBaseControl IO m) - => Either SqlBackend ConnectionPool - -> m SharedNodeState -getNodeState sharedSqlBackend = do - -- Initialize the HeaderTree - $(logDebug) "Initializing the HeaderTree and NodeState" - best <- runSql (initHeaderTree >> getBestBlock) sharedSqlBackend - liftIO $ do - sharedPeerMap <- newTVarIO M.empty - sharedHostMap <- newTVarIO M.empty - sharedNetworkHeight <- newTVarIO 0 - sharedHeaders <- newEmptyTMVarIO - sharedHeaderPeer <- newTVarIO Nothing - sharedMerklePeer <- newTVarIO Nothing - sharedSyncLock <- atomically Lock.new - sharedTickleChan <- atomically $ newTBMChan 1024 - sharedTxChan <- atomically $ newTBMChan 1024 - sharedTxGetData <- newTVarIO M.empty - sharedRescan <- newEmptyTMVarIO - sharedMempool <- newTVarIO False - sharedBloomFilter <- newTVarIO Nothing - -- Find our best node in the HeaderTree - sharedBestHeader <- newTVarIO best - sharedBestBlock <- newTVarIO nGenesisBlock - return SharedNodeState{..} - -runNodeT :: Monad m => NodeT m a -> SharedNodeState -> m a -runNodeT = runReaderT - -withNodeT :: (MonadLoggerIO m, MonadBaseControl IO m) - => NodeT m a - -> Either SqlBackend ConnectionPool - -> m a -withNodeT action sql = runNodeT action =<< getNodeState sql - -atomicallyNodeT :: MonadIO m => NodeT STM a -> NodeT m a -atomicallyNodeT action = liftIO . atomically . runReaderT action =<< ask - -{- PeerHost Session -} - -data PeerHostSession = PeerHostSession - { peerHostSessionScore :: !PeerHostScore - , peerHostSessionReconnect :: !Int - , peerHostSessionLog :: ![String] - -- ^ Important host log messages that should appear in status command - } - -instance NFData PeerHostSession where - rnf PeerHostSession{..} = - rnf peerHostSessionScore `seq` - rnf peerHostSessionReconnect `seq` - rnf peerHostSessionLog - -{- Shared Peer STM Type -} - -data SharedNodeState = SharedNodeState - { sharedPeerMap :: !(TVar (M.Map PeerId (TVar PeerSession))) - -- ^ Map of all active peers and their sessions - , sharedHostMap :: !(TVar (M.Map PeerHost (TVar PeerHostSession))) - -- ^ The peer that is currently syncing the block headers - , sharedNetworkHeight :: !(TVar BlockHeight) - -- ^ The current height of the network - , sharedHeaders :: !(TMVar (PeerId, Headers)) - -- ^ Block headers sent from a peer - , sharedHeaderPeer :: !(TVar (Maybe PeerId)) - -- ^ Peer currently syncing headers - , sharedMerklePeer :: !(TVar (Maybe PeerId)) - -- ^ Peer currently downloading merkle blocks - , sharedSyncLock :: !Lock - -- ^ Lock on the header syncing process - , sharedBestHeader :: !(TVar NodeBlock) - -- ^ Our best block header - , sharedBestBlock :: !(TVar NodeBlock) - -- ^ Our best merkle block's height - , sharedTxGetData :: !(TVar (M.Map TxHash [(PeerId, PeerHost)])) - -- ^ List of Tx GetData requests - , sharedBloomFilter :: !(TVar (Maybe (BloomFilter, Int))) - -- ^ Bloom filter - , sharedTickleChan :: !(TBMChan (PeerId, PeerHost, BlockHash)) - -- ^ Channel containing all the block tickles received from peers - , sharedTxChan :: !(TBMChan (PeerId, PeerHost, Tx)) - -- ^ Transaction channel - , sharedRescan :: !(TMVar (Either Timestamp BlockHeight)) - -- ^ Rescan requests from a timestamp or from a block height - , sharedMempool :: !(TVar Bool) - -- ^ Did we do a Mempool sync ? - , sharedSqlBackend :: !(Either SqlBackend ConnectionPool) - } - -{- Peer Data -} - -type PingNonce = Word64 - --- Data stored about a peer -data PeerSession = PeerSession - { peerSessionConnected :: !Bool - -- ^ True if the peer is connected (completed the handshake) - , peerSessionVersion :: !(Maybe Version) - -- ^ Contains the version message that we received from the peer - , peerSessionHeight :: !BlockHeight - -- ^ Current known height of the peer - , peerSessionChan :: !(TBMChan Message) - -- ^ Message channel to send messages to the peer - , peerSessionHost :: !PeerHost - -- ^ Host to which this peer is connected - , peerSessionThreadId :: !ThreadId - -- ^ Peer ThreadId - , peerSessionMerkleChan :: !(TBMChan (Either (MerkleBlock, MerkleTxs) Tx)) - -- ^ Merkle block/Merkle transaction channel - , peerSessionPings :: !(TVar [PingNonce]) - -- ^ Time at which we requested pings - , peerSessionScore :: !(Maybe NominalDiffTime) - -- ^ Ping scores for this peer (round trip times) - } - -instance NFData PeerSession where - rnf PeerSession{..} = - rnf peerSessionConnected `seq` - rnf peerSessionVersion `seq` - rnf peerSessionHeight `seq` - peerSessionChan `seq` - rnf peerSessionHost `seq` - peerSessionThreadId `seq` () - -{- Peer Hosts -} - -data PeerHost = PeerHost - { peerHost :: !String - , peerPort :: !Int - } - deriving (Eq, Ord) - -$(deriveJSON (dropFieldLabel 4) ''PeerHost) - -peerHostString :: PeerHost -> String -peerHostString PeerHost{..} = concat [ peerHost, ":", show peerPort ] - -instance NFData PeerHost where - rnf PeerHost{..} = - rnf peerHost `seq` - rnf peerPort - -{- Node Status -} - -data PeerStatus = PeerStatus - -- Regular fields - { peerStatusPeerId :: !Int - , peerStatusHost :: !PeerHost - , peerStatusConnected :: !Bool - , peerStatusHeight :: !BlockHeight - , peerStatusProtocol :: !(Maybe Word32) - , peerStatusUserAgent :: !(Maybe String) - , peerStatusPing :: !(Maybe String) - , peerStatusDoSScore :: !(Maybe PeerHostScore) - -- Debug fields - , peerStatusHaveMerkles :: !Bool - , peerStatusHaveMessage :: !Bool - , peerStatusPingNonces :: ![PingNonce] - , peerStatusReconnectTimer :: !(Maybe Int) - , peerStatusLog :: !(Maybe [String]) - } - -$(deriveJSON (dropFieldLabel 10) ''PeerStatus) - -data NodeStatus = NodeStatus - -- Regular fields - { nodeStatusPeers :: ![PeerStatus] - , nodeStatusNetworkHeight :: !BlockHeight - , nodeStatusBestHeader :: !BlockHash - , nodeStatusBestHeaderHeight :: !BlockHeight - , nodeStatusBestBlock :: !BlockHash - , nodeStatusBestBlockHeight :: !BlockHeight - , nodeStatusBloomSize :: !Int - -- Debug fields - , nodeStatusHeaderPeer :: !(Maybe Int) - , nodeStatusMerklePeer :: !(Maybe Int) - , nodeStatusHaveHeaders :: !Bool - , nodeStatusHaveTickles :: !Bool - , nodeStatusHaveTxs :: !Bool - , nodeStatusGetData :: ![TxHash] - , nodeStatusRescan :: !(Maybe (Either Timestamp BlockHeight)) - , nodeStatusMempool :: !Bool - , nodeStatusSyncLock :: !Bool - } - -$(deriveJSON (dropFieldLabel 10) ''NodeStatus) - -{- Getters / Setters -} - -tryGetPeerSession :: PeerId -> NodeT STM (Maybe PeerSession) -tryGetPeerSession pid = do - peerMap <- readTVarS sharedPeerMap - case M.lookup pid peerMap of - Just sessTVar -> fmap Just $ lift $ readTVar sessTVar - _ -> return Nothing - -getPeerSession :: PeerId -> NodeT STM PeerSession -getPeerSession pid = do - sessM <- tryGetPeerSession pid - case sessM of - Just sess -> return sess - _ -> throw $ NodeExceptionInvalidPeer $ ShowPeerId pid - -newPeerSession :: PeerId -> PeerSession -> NodeT STM () -newPeerSession pid sess = do - peerMapTVar <- asks sharedPeerMap - peerMap <- lift $ readTVar peerMapTVar - case M.lookup pid peerMap of - Just _ -> return () - Nothing -> do - sessTVar <- lift $ newTVar sess - let newMap = M.insert pid sessTVar peerMap - lift $ writeTVar peerMapTVar $! newMap - -modifyPeerSession :: PeerId -> (PeerSession -> PeerSession) -> NodeT STM () -modifyPeerSession pid f = do - peerMap <- readTVarS sharedPeerMap - case M.lookup pid peerMap of - Just sessTVar -> lift $ modifyTVar' sessTVar f - _ -> return () - -removePeerSession :: PeerId -> NodeT STM (Maybe PeerSession) -removePeerSession pid = do - peerMapTVar <- asks sharedPeerMap - peerMap <- lift $ readTVar peerMapTVar - -- Close the peer TBMChan - sessM <- case M.lookup pid peerMap of - Just sessTVar -> lift $ do - sess@PeerSession{..} <- readTVar sessTVar - closeTBMChan peerSessionChan - return $ Just sess - _ -> return Nothing - -- Remove the peer from the peerMap - let newMap = M.delete pid peerMap - lift $ writeTVar peerMapTVar $! newMap - return sessM - -getHostSession :: PeerHost - -> NodeT STM (Maybe PeerHostSession) -getHostSession ph = do - hostMap <- readTVarS sharedHostMap - lift $ case M.lookup ph hostMap of - Just hostSessionTVar -> Just <$> readTVar hostSessionTVar - _ -> return Nothing - -modifyHostSession :: PeerHost - -> (PeerHostSession -> PeerHostSession) - -> NodeT STM () -modifyHostSession ph f = do - hostMap <- readTVarS sharedHostMap - case M.lookup ph hostMap of - Just hostSessionTVar -> lift $ modifyTVar' hostSessionTVar f - _ -> newHostSession ph $! - f PeerHostSession { peerHostSessionScore = 0 - , peerHostSessionReconnect = 1 - , peerHostSessionLog = [] - } - -newHostSession :: PeerHost -> PeerHostSession -> NodeT STM () -newHostSession ph session = do - hostMapTVar <- asks sharedHostMap - hostMap <- lift $ readTVar hostMapTVar - case M.lookup ph hostMap of - Just _ -> return () - Nothing -> lift $ do - hostSessionTVar <- newTVar session - let newHostMap = M.insert ph hostSessionTVar hostMap - writeTVar hostMapTVar $! newHostMap - -{- Host DOS Scores -} - -bannedScore :: PeerHostScore -bannedScore = 100 - -minorDoS :: PeerHostScore -> PeerHostScore -minorDoS = (+ 1) - -moderateDoS :: PeerHostScore -> PeerHostScore -moderateDoS = (+ 10) - -severeDoS :: PeerHostScore -> PeerHostScore -severeDoS = (+ bannedScore) - -isHostScoreBanned :: PeerHostScore -> Bool -isHostScoreBanned = (>= bannedScore) - -{- STM Utilities -} - -orElseNodeT :: NodeT STM a -> NodeT STM a -> NodeT STM a -orElseNodeT a b = do - s <- ask - lift $ runNodeT a s `orElse` runNodeT b s - -{- TVar Utilities -} - -readTVarS :: (SharedNodeState -> TVar a) -> NodeT STM a -readTVarS = lift . readTVar <=< asks - -writeTVarS :: (SharedNodeState -> TVar a) -> a -> NodeT STM () -writeTVarS f val = lift . flip writeTVar val =<< asks f - -{- TMVar Utilities -} - -takeTMVarS :: (SharedNodeState -> TMVar a) -> NodeT STM a -takeTMVarS = lift . takeTMVar <=< asks - -readTMVarS :: (SharedNodeState -> TMVar a) -> NodeT STM a -readTMVarS = lift . readTMVar <=< asks - -tryReadTMVarS :: (SharedNodeState -> TMVar a) -> NodeT STM (Maybe a) -tryReadTMVarS = lift . tryReadTMVar <=< asks - -putTMVarS :: (SharedNodeState -> TMVar a) -> a -> NodeT STM () -putTMVarS f val = lift . flip putTMVar val =<< asks f - -tryPutTMVarS :: (SharedNodeState -> TMVar a) -> a -> NodeT STM Bool -tryPutTMVarS f val = lift . flip tryPutTMVar val =<< asks f - -swapTMVarS :: (SharedNodeState -> TMVar a) -> a -> NodeT STM () -swapTMVarS f val = lift . flip putTMVar val =<< asks f - -isEmptyTMVarS :: (SharedNodeState -> TMVar a) -> NodeT STM Bool -isEmptyTMVarS f = lift . isEmptyTMVar =<< asks f - -data NodeException - = NodeExceptionBanned - | NodeExceptionConnected - | NodeExceptionInvalidPeer !ShowPeerId - | NodeExceptionPeerNotConnected !ShowPeerId - | NodeException !String - deriving (Show, Typeable) - -instance Exception NodeException - -isNodeException :: SomeException -> Bool -isNodeException se = isJust (fromException se :: Maybe NodeException) - -catchAny :: MonadBaseControl IO m - => m a -> (SomeException -> m a) -> m a -catchAny = catch - -catchAny_ :: MonadBaseControl IO m - => m () -> m () -catchAny_ = flip catchAny $ \_ -> return () - diff --git a/haskoin-node/test/Main.hs b/haskoin-node/test/Main.hs deleted file mode 100644 index f2b4d927..00000000 --- a/haskoin-node/test/Main.hs +++ /dev/null @@ -1,16 +0,0 @@ -module Main where - -import Test.Framework (defaultMain) - -import Network.Haskoin.Constants -import qualified Network.Haskoin.Node.Tests (tests) -import qualified Network.Haskoin.Node.Units (tests) - -main :: IO () -main = do - setProdnet - defaultMain - ( Network.Haskoin.Node.Tests.tests - ++ Network.Haskoin.Node.Units.tests - ) - diff --git a/haskoin-node/test/Network/Haskoin/Node/Tests.hs b/haskoin-node/test/Network/Haskoin/Node/Tests.hs deleted file mode 100644 index 2173de44..00000000 --- a/haskoin-node/test/Network/Haskoin/Node/Tests.hs +++ /dev/null @@ -1,12 +0,0 @@ -module Network.Haskoin.Node.Tests (tests) where - -import Test.Framework (Test, testGroup) --- import Test.Framework.Providers.QuickCheck2 (testProperty) - -tests :: [Test] -tests = - [ testGroup "Serialize & de-serialize haskoin node types to JSON" - [ - ] - ] - diff --git a/haskoin-node/test/Network/Haskoin/Node/Units.hs b/haskoin-node/test/Network/Haskoin/Node/Units.hs deleted file mode 100644 index 7e2a3d37..00000000 --- a/haskoin-node/test/Network/Haskoin/Node/Units.hs +++ /dev/null @@ -1,252 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} -module Network.Haskoin.Node.Units where -import Control.Monad (forM_, when) -import Control.Monad.Logger (NoLoggingT) -import Control.Monad.Trans (MonadIO, liftIO) -import Control.Monad.Trans.Resource (ResourceT) -import Data.Maybe (fromJust, isNothing, - maybeToList) -import Data.Word (Word32) -import Database.Persist.Sqlite (SqlPersistT, - runMigrationSilent, runSqlite) -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Node.HeaderTree -import Test.Framework (Test, testGroup) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, assertBool, - assertEqual, assertFailure) - --- TODO: Make sure that evalNewChain for a partially overlapping best chain --- properly evaluates to BestChain. - -type App = SqlPersistT (NoLoggingT (ResourceT IO)) - -tests :: [Test] -tests = - [ testGroup "Header Tree" - [ testCase "Initalization successful" $ runUnit initialize - , testCase "Add second block" $ runUnit addSecondBlock - , testCase "Blockchain head correct" $ runUnit blockChainHead - , testCase "Find fork node" $ runUnit forkNode - , testCase "Find fork node (non-head)" $ runUnit forkNodeNonHead - , testCase "Find fork node (same chain)" $ runUnit forkNodeSameChain - , testCase "Get best chain" $ runUnit getBestChain - , testCase "Get side chain" $ runUnit getSideChain - , testCase "Nodes at height" $ runUnit getNodesHeight - , testCase "Block locator to head" $ runUnit bLocatorToHead - , testCase "Block locator to non-head" $ runUnit bLocatorToNode - , testCase "Find split node" $ runUnit splitNode - ] - ] - -initialize :: App () -initialize = do - initHeaderTree - bM <- getBlockByHash (headerHash genesisHeader) - liftIO $ assertEqual "Genesis node in header tree" (Just nGenesisBlock) bM - hs <- getHeads - liftIO $ assertEqual "Genesis node is only head" [nGenesisBlock] hs - bh <- getBestBlock - liftIO $ assertEqual "Genesis node matches best header" nGenesisBlock bh - -addSecondBlock :: App () -addSecondBlock = do - initHeaderTree - let block = head chain0 - liftIO $ assertEqual "Block builds on genesis block" - (headerHash genesisHeader) - (nodePrev block) - putBlock block - block' <- getBlockByHash $ nodeHash block - liftIO $ assertEqual "Block can be retrieved" (Just block) block' - -blockChainHead :: App () -blockChainHead = mockBlockChain >> do - heads <- getHeads - liftIO $ assertEqual "Heads match" - [last chain0, last chain1, last chain2, last chain3] - heads - bh <- getBestBlock - liftIO $ assertEqual "Best block has correct hash" - (nodeHash $ last chain3) (nodeHash bh) - liftIO $ assertEqual "Best block height is right" - (nodeBlockHeight $ last chain3) (nodeBlockHeight bh) - -forkNode :: App () -forkNode = mockBlockChain >> do - let l = last chain2 - r = last chain3 - bn <- splitBlock l r - - liftIO $ assertEqual "Split block are correct" - (chain0 !! 1) bn - - commonLM <- getBlockByHeight l $ nodeBlockHeight bn - when (isNothing commonLM) $ liftIO $ - assertFailure "Could not find fork on left side" - let commonL = fromJust commonLM - - commonRM <- getBlockByHeight r $ nodeBlockHeight bn - when (isNothing commonRM) $ liftIO $ - assertFailure "Could not find fork on right side" - let commonR = fromJust commonRM - - firstLM <- getBlockByHeight l (nodeBlockHeight bn + 1) - when (isNothing firstLM) $ liftIO $ - assertFailure "Could not find fork child on left side" - let firstL = fromJust firstLM - - firstRM <- getBlockByHeight r (nodeBlockHeight bn + 1) - when (isNothing firstLM) $ liftIO $ - assertFailure "Could not find fork child on right side" - let firstR = fromJust firstRM - - liftIO $ assertEqual "Fork node is same in both sides" commonL commonR - liftIO $ assertEqual "Fork node connect with left side" - (nodeHash commonL) - (nodePrev firstL) - liftIO $ assertEqual "Fork node connect with right side" - (nodeHash commonR) - (nodePrev firstR) - liftIO $ assertBool "After-fork chains diverge" $ firstL /= firstR - liftIO $ assertEqual "Fork node matches hardcoded one" - (chain0 !! 1) commonL - -forkNodeNonHead :: App () -forkNodeNonHead = mockBlockChain >> do - let l = chain2 !! 1 - r = chain1 !! 1 - height <- nodeBlockHeight <$> splitBlock l r - splitM <- getBlockByHeight l height - liftIO $ assertEqual "Fork node is correct" (Just $ chain1 !! 1) splitM - -forkNodeSameChain :: App () -forkNodeSameChain = mockBlockChain >> do - let l = chain3 !! 5 - r = chain3 !! 3 - height <- nodeBlockHeight <$> splitBlock l r - splitM <- getBlockByHeight r height - liftIO $ assertEqual "Fork node is correct" (Just $ chain3 !! 3) splitM - -getBestChain :: App () -getBestChain = mockBlockChain >> do - h <- getBestBlock - ch <- getBlocksFromHeight h 0 0 - liftIO $ assertEqual "Best chain correct" bch ch - where - bch = nGenesisBlock : take 2 chain0 ++ chain3 - -getSideChain :: App () -getSideChain = mockBlockChain >> do - ch <- getBlocksFromHeight (chain2 !! 1) 0 0 - liftIO $ assertEqual "Side chain correct" sch ch - where - sch = nGenesisBlock : - take 3 chain0 ++ take 2 chain1 ++ take 2 chain2 - -getNodesHeight :: App () -getNodesHeight = mockBlockChain >> do - ns <- getBlocksAtHeight 3 - liftIO $ assertEqual "Nodes at height match" hns ns - where - hns = [chain0 !! 2, head chain3] - -bLocatorToHead :: App () -bLocatorToHead = do - mockBlockChain - putBlocks bs - h <- getBestBlock - liftIO $ assertEqual "Head matches" (last bs) h - ls <- bLocator h - liftIO $ assertEqual "Last is genesis" - (last ls) - (headerHash genesisHeader) - liftIO $ assertEqual "First is current head" - (head ls) - (nodeHash h) - last10 <- map nodeHash . reverse <$> - getBlocksFromHeight h 0 (nodeBlockHeight h - 9) - liftIO $ assertEqual "Last ten blocks contiguous" - last10 - (take 10 ls) - let h10 = nodeBlockHeight h - 10 - bhs <- map (nodeHash . fromJust) <$> - mapM (getBlockByHeight h) - [h10, h10 - 2, h10 - 6, h10 - 14, h10 - 30, h10 - 62] - liftIO $ assertEqual "All block hashes correct" - (last10 ++ bhs ++ [headerHash genesisHeader]) - ls - where - bs = manyBlocks $ last chain1 - -bLocatorToNode :: App () -bLocatorToNode = do - mockBlockChain - putBlocks bs - n <- fromJust <$> getBlockByHash (nodeHash $ chain3 !! 4) - ls <- bLocator n - xs <- map nodeHash . reverse <$> - getBlocksFromHeight n 0 0 - liftIO $ assertEqual "Block locator for non-head node is correct" xs ls - where - bs = manyBlocks $ last chain1 - -splitNode :: App () -splitNode = do - mockBlockChain - (split, ls, rs) <- splitChains (last chain2, 0) (last chain3, 0) - liftIO $ assertEqual "Split node correct" (chain0 !! 1) split - liftIO $ assertEqual "Left correct" - ([chain0 !! 2] ++ take 2 chain1 ++ chain2) - ls - liftIO $ assertEqual "Right correct" chain3 rs - -runUnit :: App () -> Assertion -runUnit action = runSqlite ":memory:" $ do - _ <- runMigrationSilent migrateHeaderTree - action - -mockBlockChain :: MonadIO m => SqlPersistT m () -mockBlockChain = do - initHeaderTree - forM_ (concat [chain0, chain1, chain2, chain3]) putBlock - -manyBlocks :: NodeBlock -> [NodeBlock] -manyBlocks b = - tail $ reverse $ foldBlock (Just b) $ zip [18..117] (repeat 4) - - -chain0 :: [NodeBlock] -chain0 = - tail $ reverse $ foldBlock Nothing $ zip [1..4] (repeat 0) - -chain1 :: [NodeBlock] -chain1 = - tail $ reverse $ foldBlock (Just $ chain0 !! 2) $ zip [5..7] (repeat 1) - -chain2 :: [NodeBlock] -chain2 = - tail $ reverse $ foldBlock (Just $ chain1 !! 1) $ zip [8..10] (repeat 2) - -chain3 :: [NodeBlock] -chain3 = - tail $ reverse $ foldBlock (Just $ chain0 !! 1) $ zip [11..17] (repeat 3) - -foldBlock :: Maybe NodeBlock -> [(Word32, Word32)] -> [NodeBlock] -foldBlock nM = - foldl f (maybeToList nM) - where - f [] _ = [nGenesisBlock] - f ls@(l:_) (n, chain) = mockBlock l chain n : ls - -mockBlock :: NodeBlock -> Word32 -> Word32 -> NodeBlock -mockBlock parent chain n = nodeBlock parent chain bh - where - bh = BlockHeader - (blockVersion $ nHeader parent) - (nodeHash parent) - "0000000000000000000000000000000000000000000000000000000000000000" - (nodeTimestamp parent + 600) - (blockBits $ nHeader parent) - n diff --git a/haskoin-wallet/.stylish-haskell.yaml b/haskoin-wallet/.stylish-haskell.yaml deleted file mode 100644 index 97b2231e..00000000 --- a/haskoin-wallet/.stylish-haskell.yaml +++ /dev/null @@ -1,169 +0,0 @@ -# stylish-haskell configuration file -# ================================== - -# The stylish-haskell tool is mainly configured by specifying steps. These steps -# are a list, so they have an order, and one specific step may appear more than -# once (if needed). Each file is processed by these steps in the given order. -steps: - # Convert some ASCII sequences to their Unicode equivalents. This is disabled - # by default. - # - unicode_syntax: - # # In order to make this work, we also need to insert the UnicodeSyntax - # # language pragma. If this flag is set to true, we insert it when it's - # # not already present. You may want to disable it if you configure - # # language extensions using some other method than pragmas. Default: - # # true. - # add_language_pragma: true - - # Align the right hand side of some elements. This is quite conservative - # and only applies to statements where each element occupies a single - # line. - - simple_align: - cases: true - top_level_patterns: true - records: true - - # Import cleanup - - imports: - # There are different ways we can align names and lists. - # - # - global: Align the import names and import list throughout the entire - # file. - # - # - file: Like global, but don't add padding when there are no qualified - # imports in the file. - # - # - group: Only align the imports per group (a group is formed by adjacent - # import lines). - # - # - none: Do not perform any alignment. - # - # Default: global. - align: global - - # Folowing options affect only import list alignment. - # - # List align has following options: - # - # - after_alias: Import list is aligned with end of import including - # 'as' and 'hiding' keywords. - # - # > import qualified Data.List as List (concat, foldl, foldr, head, - # > init, last, length) - # - # - with_alias: Import list is aligned with start of alias or hiding. - # - # > import qualified Data.List as List (concat, foldl, foldr, head, - # > init, last, length) - # - # - new_line: Import list starts always on new line. - # - # > import qualified Data.List as List - # > (concat, foldl, foldr, head, init, last, length) - # - # Default: after alias - list_align: after_alias - - # Long list align style takes effect when import is too long. This is - # determined by 'columns' setting. - # - # - inline: This option will put as much specs on same line as possible. - # - # - new_line: Import list will start on new line. - # - # - new_line_multiline: Import list will start on new line when it's - # short enough to fit to single line. Otherwise it'll be multiline. - # - # - multiline: One line per import list entry. - # Type with contructor list acts like single import. - # - # > import qualified Data.Map as M - # > ( empty - # > , singleton - # > , ... - # > , delete - # > ) - # - # Default: inline - long_list_align: inline - - # List padding determines indentation of import list on lines after import. - # This option affects 'list_align' and 'long_list_align'. - list_padding: 4 - - # Separate lists option affects formating of import list for type - # or class. The only difference is single space between type and list - # of constructors, selectors and class functions. - # - # - true: There is single space between Foldable type and list of it's - # functions. - # - # > import Data.Foldable (Foldable (fold, foldl, foldMap)) - # - # - false: There is no space between Foldable type and list of it's - # functions. - # - # > import Data.Foldable (Foldable(fold, foldl, foldMap)) - # - # Default: true - separate_lists: true - - # Language pragmas - - language_pragmas: - # We can generate different styles of language pragma lists. - # - # - vertical: Vertical-spaced language pragmas, one per line. - # - # - compact: A more compact style. - # - # - compact_line: Similar to compact, but wrap each line with - # `{-#LANGUAGE #-}'. - # - # Default: vertical. - style: vertical - - # Align affects alignment of closing pragma brackets. - # - # - true: Brackets are aligned in same collumn. - # - # - false: Brackets are not aligned together. There is only one space - # between actual import and closing bracket. - # - # Default: true - align: true - - # stylish-haskell can detect redundancy of some language pragmas. If this - # is set to true, it will remove those redundant pragmas. Default: true. - remove_redundant: true - - # Replace tabs by spaces. This is disabled by default. - # - tabs: - # # Number of spaces to use for each tab. Default: 8, as specified by the - # # Haskell report. - # spaces: 8 - - # Remove trailing whitespace - - trailing_whitespace: {} - -# A common setting is the number of columns (parts of) code will be wrapped -# to. Different steps take this into account. Default: 80. -columns: 80 - -# Sometimes, language extensions are specified in a cabal file or from the -# command line instead of using language pragmas in the file. stylish-haskell -# needs to be aware of these, so it can parse the file correctly. -# -# No language extensions are enabled by default. -language_extensions: - - DeriveDataTypeable - - EmptyDataDecls - - FlexibleContexts - - FlexibleInstances - - GADTs - - GeneralizedNewtypeDeriving - - MultiParamTypeClasses - - OverloadedStrings - - QuasiQuotes - - RecordWildCards - - TemplateHaskell - - TypeFamilies diff --git a/haskoin-wallet/Dockerfile b/haskoin-wallet/Dockerfile deleted file mode 100644 index 861a5c16..00000000 --- a/haskoin-wallet/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Jean-Pierre Rupp - -RUN apt-get update -RUN apt-get install -y \ - git \ - wget \ - libleveldb-dev \ - libzmq3-dev \ - libsnappy-dev \ - pkg-config \ - zlib1g-dev -RUN wget -q -O- \ - https://s3.amazonaws.com/download.fpcomplete.com/ubuntu/fpco.key \ - | apt-key add - -RUN echo 'deb http://download.fpcomplete.com/ubuntu/trusty stable main' \ - > /etc/apt/sources.list.d/fpco.list -RUN apt-get update && apt-get install -y stack -WORKDIR /usr/local/src -RUN git clone https://github.com/haskoin/haskoin-wallet.git -WORKDIR /usr/local/src/haskoin-wallet -RUN stack setup -RUN stack install -RUN mv /root/.local/bin/hw /usr/local/bin -RUN rm -rf /root/.stack /root/.local -WORKDIR /root -CMD [ "/usr/local/bin/hw" ] diff --git a/haskoin-wallet/Setup.hs b/haskoin-wallet/Setup.hs deleted file mode 100644 index 9a994af6..00000000 --- a/haskoin-wallet/Setup.hs +++ /dev/null @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff --git a/haskoin-wallet/UNLICENSE b/haskoin-wallet/UNLICENSE deleted file mode 100644 index 68a49daa..00000000 --- a/haskoin-wallet/UNLICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/haskoin-wallet/app/Main.hs b/haskoin-wallet/app/Main.hs deleted file mode 100644 index c1397cf1..00000000 --- a/haskoin-wallet/app/Main.hs +++ /dev/null @@ -1,7 +0,0 @@ -module Main where - -import Network.Haskoin.Wallet.Client - -main :: IO () -main = clientMain - diff --git a/haskoin-wallet/config/config.yml b/haskoin-wallet/config/config.yml deleted file mode 100644 index ad95df15..00000000 --- a/haskoin-wallet/config/config.yml +++ /dev/null @@ -1,123 +0,0 @@ -# ZeroMQ socket on which to listen to. Either absolute path or relative to -# work-dir/network. -bind-socket: ipc://hw.sock -bind-socket-notif: ipc://notif.sock - -# ZeroMQ socket to communicate with the server. -# Either absolute path or relative to the server work-dir/network path. -connect-uri: ipc://hw.sock -connect-uri-notif: ipc://notif.sock - -# Server mode. Can be either online or offline. In offline mode, the SPV -# daemon does not start and only the local wallet is available to query. -server-mode: online - -# False positive rate for the bloom filters. -bloom-false-positive: 0.00001 - -# Database connection information. -database: - testnet5: - database: hw-wallet.sqlite3 - poolsize: 1 - prodnet: - database: hw-wallet.sqlite3 - poolsize: 1 - -# List of trusted bitcoin full-nodes to connect to. -bitcoin-full-nodes: - testnet5: - - host: seed.testnet5.b-pay.net - port: 18555 - - host: bitcoin-testnet.bloqseeds.net - port: 18555 - prodnet: - - host: seed.mainnet.b-pay.net - port: 8333 - - host: seed.ob1.io - port: 8333 - - host: seed.blockchain.info - port: 8333 - - host: bitcoin.bloqseeds.net - port: 8333 - - host: seed.bitcoin.sipa.be - port: 8333 - - host: dnsseed.bluematt.me - port: 8333 - - host: dnsseed.bitcoin.dashjr.org - port: 8333 - - host: seed.bitcoinstats.com - port: 8333 - - host: seed.bitcoin.jonasschnelli.ch - port: 8333 - - -# Log file name. Either absolute path or relative to work-dir/network -log-file: hw.log - -# PID file name. Either absolute path or relative to work-dir/network. -pid-file: hw.pid - -# Compile time configuration value. Either absolute path or relative to -# work-dir. Can only be set as environment variable. -config-file: config.yml - -# Default output size for commands such as page sizes. -output-size: 10 - -# Type of addresses to display. Example: external, internal -address-type: external - -# Displaz public keys instead of addresses in relevant commands -display-pubkeys: false - -# Use reverse paging for diplaying addresses and txs when set to True. -reverse-paging: false - -# Sign new and imported transactions. -sign-transactions: true - -# Default fee to pay (in satoshi) for every 1000 bytes. -transaction-fee: 10000 - -# Minimum number of confirmations for spending coins and displaying balances. -minimum-confirmations: 0 - -# Display the balance including offline transactions -offline: false - -# Entropy in bytes to use when generating a mnemonic (between 16 and 32) -seed-entropy: 16 - -# How command-line output should be displayed. Supported values are: -# normal, json or yaml. -display-format: normal - -# Detach the SPV server from the terminal when launched -detach-server: false - -# Use Testnet3 -use-testnet: false - -# Haskoin working directory. Either absolute path or relative to user’s home. -# Defaults to an appropriate OS-specific value. -work-dir: "" - -# Log level. Valid values are debug, info, warn and error. -log-level: info - -# Print verbose -verbose: false - -# Recipient pays transaction fee. DANGEROUS. -recipient-fee: false - -# Server key for authentication and encryption (server config). -server-key: -# Server public key for authentication and encryption (client config). -server-key-public: - -# Client key for authentication and encryption (client config). -client-key: -# Client public key for aunthentication and encryption (client + server config). -client-key-public: diff --git a/haskoin-wallet/config/help b/haskoin-wallet/config/help deleted file mode 100644 index bb100864..00000000 --- a/haskoin-wallet/config/help +++ /dev/null @@ -1,53 +0,0 @@ -Server commands: - start [--detach] Start the haskoin daemon - stop Stop the haskoin daemon - status [--verbose] Display node runtime information - -Account commands: - newacc name Create a new account - newread name Create a new read-only account - newms name M N Create a new multisig account - newreadms name M N Create a new read-only ms account - addkey acc Add pubkeys to a multisig account - setgap acc gap Set the address gap for an account - accounts [page] [-c pagesize] [-r] List all accounts in a keyring - account acc Display an account by name - rename old new Rename account - -Address commands: - list acc [page] [-c pagesize] [-r] Display addresses by page - unused acc [page] [-c pagesize] [-r] Display unused addresses - label acc index label Set the label of an address - addrtxs acc index [page] [-c pagesize] Display txs related to an address - getindex acc key Get key index by pubkey - genaddrs acc index [--internal] Generate addresses up to this index - uri acc index [amount] [msg] Create a Bitcoin payment request URI - -Transaction commands: - txs acc [page] [-c pagesize] [-r] Display all transactions by page - pending acc [page] [-c pagesize] [-r] Display pending transactions by page - dead acc [page] [-c pagesize] [-r] Display dead transactions by page - send acc addr amount [-S] Send coins to an address - sendmany acc {addr:amount...} [-S] Send coins to many addresses - import acc Import a transaction (does not sign) - sign acc txid Sign one of your offline transactions - balance acc [--minconf] [--offline] Display account balance - gettx acc txid Get a transaction by txid - deletetx txid Delete unconfirmed transaction - sync acc {hash|height} [page] Get blocks following specified one - -Offline tx signing commands: - getoffline acc txhash Get data to sign a tx offline - signoffline acc Sign a tx with offline signing data - -Utility commands: - decodetx Decode HEX transaction - rescan [timestamp] Rescan the wallet - keypair Get curve key pair for ØMQ auth - monitor [account] Monitor events (no account: blocks) - blockinfo [hash1] [hash2] [...] Get block header information by hash - dice dicerolls Mnemonic from 99 dice rolls (1 to 6) - -Other commands: - version Display version information - help Display this help information diff --git a/haskoin-wallet/config/models b/haskoin-wallet/config/models deleted file mode 100644 index d3aaa411..00000000 --- a/haskoin-wallet/config/models +++ /dev/null @@ -1,83 +0,0 @@ -Account - name Text maxlen=200 - type AccountType maxlen=64 - master XPrvKey Maybe - keys [XPubKey] - gap Word32 - created UTCTime - UniqueAccount name - deriving Show - -WalletAddr - account AccountId - address Address maxlen=64 - index KeyIndex - type AddressType maxlen=16 - label Text - redeem ScriptOutput Maybe - key PubKeyC Maybe maxlen=66 - created UTCTime - UniqueAddr account address - UniqueAddrRev address account - UniqueAddrIndex account type index - deriving Show - -WalletTx - account AccountId - hash TxHash maxlen=64 - nosigHash TxHash maxlen=64 - type TxType maxlen=16 - inValue Word64 - outValue Word64 - inputs [AddressInfo] - outputs [AddressInfo] - change [AddressInfo] - tx Tx - isCoinbase Bool - confidence TxConfidence maxlen=16 - confirmedBy BlockHash Maybe maxlen=64 - confirmedHeight BlockHeight Maybe - confirmedDate Timestamp Maybe - created UTCTime - UniqueAccTx account hash - UniqueAccTxRev hash account - UniqueAccNoSig account nosigHash - UniqueAccNoSigRev nosigHash account - deriving Show - -WalletCoin - account AccountId - hash TxHash maxlen=64 - pos Word32 - tx WalletTxId - addr WalletAddrId - value Word64 - script ScriptOutput - created UTCTime - UniqueCoin account hash pos - UniqueCoinRev hash pos account - UniqueCoinTx tx pos - UniqueCoinTxRev pos tx - deriving Show - -SpentCoin - account AccountId - hash TxHash maxlen=64 - pos Word32 - spendingTx WalletTxId - created UTCTime - UniqueSpentCoins account hash pos - UniqueSpentCoinsRev hash pos account - UniqueSpentTx spendingTx hash pos - UniqueSpentTxRev hash pos spendingTx - deriving Show - -WalletState - height BlockHeight - block BlockHash maxlen=64 - bloomFilter BloomFilter - bloomElems Int - bloomFp Double - version Int - created UTCTime - deriving Show diff --git a/haskoin-wallet/examples/embedded-inproc-wallet-server/Main.hs b/haskoin-wallet/examples/embedded-inproc-wallet-server/Main.hs deleted file mode 100644 index 77797d39..00000000 --- a/haskoin-wallet/examples/embedded-inproc-wallet-server/Main.hs +++ /dev/null @@ -1,187 +0,0 @@ -module Main where - -import Network.Haskoin.Wallet (Config(..), - WalletRequest(..), WalletResponse(..), - AddressType(..), OutputFormat(..), - SPVMode(..), NodeAction(..)) - -import Network.Haskoin.Wallet.Server (runSPVServerWithContext) -import Network.Haskoin.Wallet.Internals (BTCNode(..), Notif(..)) -import qualified Network.Haskoin.Node.STM as Node - -import Data.String.Conversions (cs) -import qualified System.ZMQ4 as ZMQ -import qualified Control.Monad.Logger as Log -import qualified Data.HashMap.Strict as HM -import qualified Database.Persist.Sqlite as DB -import qualified Control.Monad.Trans.Resource as Resource -import qualified Data.Aeson as JSON -import qualified Control.Concurrent as Con -import qualified Data.Aeson.Encode.Pretty as PrettyJSON -import qualified Control.Monad as M -import qualified Control.Exception as Except - - -databaseConf :: DB.SqliteConf -databaseConf = DB.SqliteConf "/tmp/tmpdb" 1 - -cmdSocket :: String -cmdSocket = "inproc://cmd" - -notifSocket :: String -notifSocket = "inproc://notif" - - --- |Simple example app that embeds a haskoin-wallet server. --- Start wallet server + notification thread, and execute Status command when pressing ENTER -main :: IO () -main = ZMQ.withContext $ \ctx -> do - -- Server - putStrLn "Starting server..." - _ <- Con.forkIO $ runWallet walletServerConf ctx - -- Notify thread - putStrLn "Starting notification thread..." - _ <- Con.forkIO $ notifyThread ctx notifyHandler - -- Status loop - M.forever $ do - putStrLn "Press ENTER to get server status..." - _ <- getLine - cmdGetStatus ctx >>= printStatusJSON - where - printStatusJSON = putStrLn . cs . PrettyJSON.encodePretty - notifyHandler notif = - putStrLn $ "NOTIFY: New block: " ++ cs (PrettyJSON.encodePretty notif) - --- |Run haskoin-wallet using the specified ZeroMQ Context, --- and log to stderr. -runWallet :: Config -> ZMQ.Context -> IO () -runWallet cfg ctx = run $ runSPVServerWithContext cfg ctx - where run = Resource.runResourceT . runLogging - runLogging = Log.runStderrLoggingT . Log.filterLogger logFilter - logFilter _ l = l >= configLogLevel cfg - -cmdGetStatus :: ZMQ.Context -> IO Node.NodeStatus -cmdGetStatus ctx = - sendCmdOrFail (NodeActionReq NodeActionStatus) ctx >>= - \res -> case res of - Nothing -> error "ERROR: Status command: no response." - Just status -> return status - -sendCmdOrFail :: (JSON.FromJSON a, JSON.ToJSON a) - => WalletRequest - -> ZMQ.Context - -> IO (Maybe a) -sendCmdOrFail cmd ctx = - sendCmd cmd ctx >>= - either error return >>= - \res -> case res of - ResponseError e -> error $ "ERROR: Send cmd, ResponseError: " ++ cs e - ResponseValid r -> return r - -sendCmd :: (JSON.FromJSON a, JSON.ToJSON a) - => WalletRequest - -> ZMQ.Context - -> IO (Either String (WalletResponse a)) -sendCmd req ctx = - ZMQ.withSocket ctx ZMQ.Req $ \sock -> do - ZMQ.setLinger (ZMQ.restrict (0 :: Int)) sock - ZMQ.connect sock cmdSocket - ZMQ.send sock [] (cs $ JSON.encode req) - JSON.eitherDecode . cs <$> ZMQ.receive sock - --- |Connect to notify socket, subscribe to new blocks, --- and execute the supplied handler for each new block as it arrives. -notifyThread :: ZMQ.Context -> (Notif -> IO ()) -> IO () -notifyThread ctx handler = waitAndCatch $ - ZMQ.withSocket ctx ZMQ.Sub $ \sock -> do - ZMQ.setLinger (ZMQ.restrict (0 :: Int)) sock - ZMQ.connect sock notifSocket - ZMQ.subscribe sock "[block]" - putStrLn "NOTIFY: Connected. Subscribed to new blocks." - M.forever $ do - [_,m] <- ZMQ.receiveMulti sock - notif <- either failOnErr return $ JSON.eitherDecode (cs m) - handler notif - where - failOnErr = fail . ("NOTIFY: ERROR: recv failed: " ++) - waitAndCatch ioa = Con.threadDelay 10000 >> ioa `Except.finally` waitAndCatch ioa - -btcNodes :: [BTCNode] -btcNodes = - [ BTCNode "dnsseed.bluematt.me" 8333 - , BTCNode "dnsseed.bitcoin.dashjr.org" 8333 - , BTCNode "dnsseed.bluematt.me" 8333 - , BTCNode "seed.bitcoinstats.com" 8333 - , BTCNode "seed.bitcoin.jonasschnelli.ch" 8333 - , BTCNode "seed.bitcoin.sipa.be" 8333 - , BTCNode "seed.bitnodes.io" 8333 - , BTCNode "seed.btcc.com" 8333 - ] - -walletServerConf :: Config -walletServerConf = Config - { configCount = 100 - -- ^ Output size of commands - , configMinConf = 6 - -- ^ Minimum number of confirmations - , configSignTx = True - -- ^ Sign transactions - , configFee = 50000 - -- ^ Fee to pay per 1000 bytes when creating new transactions - , configRcptFee = False - -- ^ Recipient pays fee (dangerous, no config file setting) - , configAddrType = AddressExternal - -- ^ Return internal instead of external addresses - , configDisplayPubKeys = False - -- ^ Display public keys instead of addresses - , configOffline = False - -- ^ Display the balance including offline transactions - , configEntropy = 16 - -- ^ Entropy in bytes to use when generating a mnemonic (between 16 and 32) - , configReversePaging = False - -- ^ Use reverse paging for displaying addresses and transactions - , configDerivIndex = 0 - -- ^ Derivation path when creating account - , configFormat = OutputNormal - -- ^ How to format the command-line results - , configConnect = cmdSocket - -- ^ ZeroMQ socket to connect to (location of the server) - , configConnectNotif = notifSocket - -- ^ ZeroMQ socket to connect for notifications - , configDetach = False - -- ^ Detach server when launched from command-line - , configFile = "" - -- ^ Configuration file - , configTestnet = False - -- ^ Use Testnet3 network - , configDir = "" - -- ^ Working directory - , configBind = cmdSocket - -- ^ Bind address for the ZeroMQ socket - , configBindNotif = notifSocket - -- ^ Bind address for ZeroMQ notifications - , configBTCNodes = HM.fromList [ ( "prodnet", btcNodes ) ] - -- ^ Trusted Bitcoin full nodes to connect to - , configMode = SPVOnline - -- ^ Operation mode of the SPV node. - , configBloomFP = 0.00001 - -- ^ False positive rate for the bloom filter. - , configDatabase = HM.fromList [ ( "prodnet", databaseConf ) ] - -- ^ Database configuration - , configLogFile = "" - -- ^ Log file - , configPidFile = "" - -- ^ PID File - , configLogLevel = Log.LevelInfo - -- ^ Log level - , configVerbose = True - -- ^ Verbose - , configServerKey = Nothing - -- ^ Server key for authentication and encryption (server config) - , configServerKeyPub = Nothing - -- ^ Server public key for authentication and encryption (client config) - , configClientKey = Nothing - -- ^ Client key for authentication and encryption (client config) - , configClientKeyPub = Nothing - -- ^ Client public key for authentication and encryption - } diff --git a/haskoin-wallet/haskoin-wallet.cabal b/haskoin-wallet/haskoin-wallet.cabal deleted file mode 100644 index f60a760b..00000000 --- a/haskoin-wallet/haskoin-wallet.cabal +++ /dev/null @@ -1,184 +0,0 @@ -name: haskoin-wallet -version: 0.4.3 -synopsis: - Implementation of a Bitcoin SPV Wallet with BIP32 and multisig support. -description: - This package provides a SPV (simple payment verification) wallet - implementation. It features BIP32 key management, deterministic signatures - (RFC-6979) and first order support for multi-signature transactions. You - can communicate with the wallet process through a ZeroMQ API or through a - command-line tool called "hw" which is also provided in this package. -homepage: http://github.com/haskoin/haskoin -bug-reports: http://github.com/haskoin/haskoin/issues -tested-with: GHC==8.0.2 -license: PublicDomain -license-file: UNLICENSE -author: Philippe Laprade, Jean-Pierre Rupp -maintainer: xenog@protonmail.com -category: Bitcoin, Finance, Network -build-type: Simple -cabal-version: >= 1.9.2 -extra-source-files: config/help, config/config.yml, config/models - -source-repository head - type: git - location: git://github.com/haskoin/haskoin.git - -Flag library-only - Description: Do not build the executables - Default: False - -library - hs-source-dirs: src - exposed-modules: Network.Haskoin.Wallet - Network.Haskoin.Wallet.Model - Network.Haskoin.Wallet.Client - Network.Haskoin.Wallet.Server - Network.Haskoin.Wallet.Settings - Network.Haskoin.Wallet.Internals - - other-modules: Network.Haskoin.Wallet.Types - Network.Haskoin.Wallet.Types.BlockInfo - Network.Haskoin.Wallet.Accounts - Network.Haskoin.Wallet.Transaction - Network.Haskoin.Wallet.Block - Network.Haskoin.Wallet.Server.Handler - Network.Haskoin.Wallet.Client.Commands - Network.Haskoin.Wallet.Client.PrettyJson - Network.Haskoin.Wallet.Database - - extensions: TemplateHaskell - QuasiQuotes - OverloadedStrings - MultiParamTypeClasses - TypeFamilies - GADTs - FlexibleContexts - FlexibleInstances - EmptyDataDecls - DeriveDataTypeable - RecordWildCards - GeneralizedNewtypeDeriving - - build-depends: aeson >= 1.2 && < 1.3 - , aeson-pretty >= 0.7 && < 0.9 - , base >= 4.8 && < 5 - , base64-bytestring >= 1.0.0.1 - , bytestring >= 0.10 && < 0.11 - , cereal >= 0.5 && < 0.6 - , containers >= 0.5 && < 0.6 - , conduit >= 1.2 && < 1.3 - , deepseq >= 1.4 && < 1.5 - , data-default >= 0.5 && < 0.8 - , directory >= 1.2 && < 1.4 - , daemons >= 0.2 && < 0.3 - , entropy >= 0.3.7 && < 0.4 - , exceptions >= 0.6 && < 0.9 - , esqueleto >= 2.4 && < 2.6 - , file-embed >= 0.0 && < 0.1 - , filepath >= 1.4 && < 1.5 - , haskeline - , haskoin-core >= 0.3 && < 0.5 - , haskoin-node >= 0.3 && < 0.5 - , lifted-async >= 0.2 && < 0.10 - , lifted-base >= 0.2 && < 0.3 - , monad-logger >= 0.3.13 && < 0.4 - , monad-control >= 1.0 && < 1.1 - , mtl >= 2.1 && < 2.3 - , persistent >= 2.7 && < 2.8 - , persistent-template >= 2.1 && < 2.6 - , persistent-sqlite >= 2.2 && < 2.7 - , resourcet >= 1.1 && < 1.2 - , semigroups - , split >= 0.2 && < 0.3 - , stm >= 2.4 && < 2.5 - , stm-chans >= 3.0 && < 3.1 - , stm-conduit >= 2.6 && < 3.1 - , string-conversions >= 0.4 && < 0.5 - , text >= 0.11 && < 1.3 - , time >= 1.8 && < 1.9 - , transformers-base >= 0.4 && < 0.5 - , unix >= 2.6 && < 2.8 - , unordered-containers >= 0.2 && < 0.3 - , uri-encode >= 1.5.0.5 - , yaml >= 0.8 && < 0.9 - , zeromq4-haskell >= 0.7 && < 0.8 - - ghc-options: -Wall - -executable hw - if flag(library-only) - Buildable: False - main-is: Main.hs - build-depends: base, haskoin-wallet - hs-source-dirs: app - ghc-options: -Wall - -O3 - -threaded - -eventlog - -rtsopts - -with-rtsopts=-N4 - -test-suite test-haskoin-wallet - type: exitcode-stdio-1.0 - main-is: Main.hs - - other-modules: Network.Haskoin.Wallet.Arbitrary - , Network.Haskoin.Wallet.Tests - , Network.Haskoin.Wallet.Units - - extensions: RecordWildCards - OverloadedStrings - - build-depends: aeson >= 1.2 && < 1.3 - , base >= 4.8 && < 5 - , bytestring >= 0.10 && < 0.11 - , containers >= 0.5 && < 0.6 - , directory >= 1.2 && < 1.4 - , haskoin-core >= 0.3 && < 0.5 - , haskoin-node >= 0.3 && < 0.5 - , haskoin-wallet - , monad-logger >= 0.3 && < 0.4 - , mtl >= 2.1 && < 2.3 - , persistent >= 2.7 && < 2.8 - , persistent-sqlite >= 2.2 && < 2.7 - , resourcet >= 1.1 && < 1.2 - , text >= 0.11 && < 1.3 - , unordered-containers >= 0.2 && < 0.3 - , HUnit >= 1.6 && < 1.7 - , QuickCheck >= 2.10 && < 2.11 - , stm >= 2.4 && < 2.5 - , stm-chans >= 3.0 && < 3.1 - , string-conversions >= 0.4 && < 0.5 - , test-framework >= 0.8 && < 0.9 - , test-framework-quickcheck2 >= 0.3 && < 0.4 - , test-framework-hunit >= 0.3 && < 0.4 - - hs-source-dirs: test - ghc-options: -Wall - -executable example-inproc-wallet-server - if flag(library-only) - Buildable: False - - main-is: Main.hs - hs-source-dirs: examples/embedded-inproc-wallet-server/ - extensions: OverloadedStrings - - ghc-options: -Wall - -O3 - -threaded - -rtsopts - -with-rtsopts=-N4 - - build-depends: base >= 4.8 && < 5 - , aeson >= 1.2 && < 1.3 - , aeson-pretty >= 0.7 && < 0.9 - , haskoin-node >= 0.3 && < 0.5 - , haskoin-wallet - , monad-logger >= 0.3 && < 0.4 - , persistent-sqlite >= 2.2 && < 2.7 - , resourcet >= 1.1 && < 1.2 - , unordered-containers >= 0.2 && < 0.3 - , string-conversions >= 0.4 && < 0.5 - , zeromq4-haskell >= 0.7 && < 0.8 diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet.hs b/haskoin-wallet/src/Network/Haskoin/Wallet.hs deleted file mode 100644 index 76df6fed..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet.hs +++ /dev/null @@ -1,110 +0,0 @@ -{-| - This package provides a command line application called /hw/ (haskoin - wallet). It is a lightweight bitcoin wallet featuring BIP32 key management, - deterministic signatures (RFC-6979) and first order support for - multisignature transactions. A library API for /hw/ is also exposed. --} -module Network.Haskoin.Wallet -( --- *Client - clientMain -, OutputFormat(..) -, Config(..) - --- *Server -, runSPVServer -, SPVMode(..) - --- *API JSON Types -, JsonAccount(..) -, JsonAddr(..) -, JsonCoin(..) -, JsonTx(..) - --- *API Request Types -, WalletRequest(..) -, ListRequest(..) -, NewAccount(..) -, OfflineTxData(..) -, CoinSignData(..) -, CreateTx(..) -, SignTx(..) -, NodeAction(..) -, AccountType(..) -, AddressType(..) -, addrTypeIndex -, TxType(..) -, TxConfidence(..) -, AddressInfo(..) -, BalanceInfo(..) - --- *API Response Types -, WalletResponse(..) -, TxCompleteRes(..) -, ListResult(..) -, RescanRes(..) - --- *Database Accounts -, initWallet -, accounts -, newAccount -, addAccountKeys -, getAccount -, isMultisigAccount -, isReadAccount -, isCompleteAccount -, defaultDeriv -, rootToAccKey -, rootToAccKeys - --- *Database Addresses -, getAddress -, addressesAll -, addresses -, addressList -, unusedAddresses -, addressCount -, setAddrLabel -, addressPrvKey -, useAddress -, setAccountGap -, firstAddrTime -, getPathRedeem -, getPathPubKey - --- *Database Bloom Filter -, getBloomFilter - --- *Database transactions -, txs -, addrTxs -, getTx -, getAccountTx -, importTx -, importNetTx -, signAccountTx -, createWalletTx -, signOfflineTx -, getOfflineTxData -, isCoinbaseTx - --- *Database blocks -, importMerkles -, walletBestBlock - --- *Database coins and balances -, spendableCoins -, accountBalance -, addressBalances - --- *Rescan -, resetRescan -) where - -import Network.Haskoin.Wallet.Client -import Network.Haskoin.Wallet.Server -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Types -import Network.Haskoin.Wallet.Accounts -import Network.Haskoin.Wallet.Transaction - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Accounts.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Accounts.hs deleted file mode 100644 index 8aeb4135..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Accounts.hs +++ /dev/null @@ -1,741 +0,0 @@ -module Network.Haskoin.Wallet.Accounts -( --- *Database Wallet - initWallet - --- *Database Accounts -, accounts -, newAccount -, renameAccount -, addAccountKeys -, getAccount -, isMultisigAccount -, isReadAccount -, isCompleteAccount - --- *Database Addresses -, getAddress -, addressesAll -, addresses -, addressList -, unusedAddresses -, lookupByPubKey -, addressCount -, setAddrLabel -, addressPrvKey -, useAddress -, generateAddrs -, setAccountGap -, firstAddrTime -, getPathRedeem -, getPathPubKey - --- *Database Bloom Filter -, getBloomFilter - --- * Helpers -, defaultDeriv -, rootToAccKey -, rootToAccKeys -, subSelectAddrCount -) where - -import Control.Applicative ((<|>)) -import Control.Exception (throw) -import Control.Monad (unless, void, when) -import Control.Monad.Base (MonadBase) -import Control.Monad.Catch (MonadThrow, throwM) -import Control.Monad.Trans (MonadIO, liftIO) -import Control.Monad.Trans.Resource (MonadResource) - -import Data.Default (def) -import Data.List (nub) -import Data.Maybe (fromMaybe, isJust, isNothing, - listToMaybe, mapMaybe, - maybeToList) -import Data.Serialize (encode) -import Data.String.Conversions (cs) -import Data.Text (Text, unpack) -import Data.Time.Clock (getCurrentTime) -import Data.Time.Clock.POSIX (utcTimeToPOSIXSeconds) -import Data.Word (Word32, Word8) - -import Database.Esqueleto (Entity (..), SqlExpr, - SqlPersistT, Value (..), asc, - case_, count, countDistinct, - countRows, desc, else_, from, - get, insertUnique, insert_, - limit, max_, offset, orderBy, - select, sub_select, then_, - unValue, val, when_, where_, - (&&.), (-.), (<.), (==.), - (>.), (^.)) -import qualified Database.Persist as P (update, updateWhere, - (=.)) - -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Crypto -import Network.Haskoin.Network -import Network.Haskoin.Script - -import Network.Haskoin.Wallet.Model -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Types - -{- Initialization -} - -initWallet :: MonadIO m => Double -> SqlPersistT m () -initWallet fpRate = do - prevConfigRes <- select $ from $ \c -> return $ count $ c ^. WalletStateId - let cnt = maybe 0 unValue $ listToMaybe prevConfigRes - when (cnt == (0 :: Int)) $ do - time <- liftIO getCurrentTime - -- Create an initial bloom filter - -- TODO: Compute a random nonce - let bloom = bloomCreate (filterLen 0) fpRate 0 BloomUpdateNone - insert_ WalletState - { walletStateHeight = 0 - , walletStateBlock = headerHash genesisHeader - , walletStateBloomFilter = bloom - , walletStateBloomElems = 0 - , walletStateBloomFp = fpRate - , walletStateVersion = 1 - , walletStateCreated = time - } - -{- Account -} - -defaultDeriv :: KeyIndex -> HardPath -defaultDeriv a = Deriv :| 44 :| bip44Coin :| a - -rootToAccKey :: XPrvKey -> KeyIndex -> XPrvKey -rootToAccKey xKey deriv - -- use bip44 derivation for root keys - | xPrvDepth xKey == 0 = derivePath (defaultDeriv deriv) xKey - -- use custom keys as is - | otherwise = xKey - -rootToAccKeys :: XPrvKey -> [XPubKey] -> [XPrvKey] -rootToAccKeys xKey pubs = - nub $ map (rootToAccKey xKey) is - where - is = nub $ map xPubChild pubs - --- | Fetch all accounts -accounts :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => ListRequest -> SqlPersistT m ([Account], Word32) -accounts ListRequest{..} = do - cntRes <- select $ from $ \acc -> - return $ countDistinct $ acc ^. AccountId - - let cnt = maybe 0 unValue $ listToMaybe cntRes - - when (listOffset > 0 && listOffset >= cnt) $ throw $ WalletException - "Offset beyond end of data set" - - res <- fmap (map entityVal) $ select $ from $ \acc -> do - limitOffset listLimit listOffset - return acc - - return (res, cnt) - -initGap :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account -> SqlPersistT m () -initGap accE = do - void $ createAddrs accE AddressExternal 20 - void $ createAddrs accE AddressInternal 20 - -genMnemonic :: (MonadThrow m, MonadIO m) => Word8 -> m Mnemonic -genMnemonic bytes - | bytes `elem` [16,20..32] = do - entropy <- liftIO $ getEntropy $ fromIntegral bytes - case toMnemonic entropy of - Right ms -> return ms - Left err -> throwM $ WalletException err - | otherwise = throwM $ - WalletException "Entropy can only be 16, 20, 24, 28 or 32 bytes" - -mnemonicToPrvKey :: MonadThrow m => Passphrase -> Mnemonic -> m XPrvKey -mnemonicToPrvKey pass ms = case mnemonicToSeed pass ms of - Right seed -> return $ makeXPrvKey seed - Left err -> throwM $ WalletException err - --- |Create a new account using the given mnemonic or keys. If no mnemonic, --- master key or public keys are provided, a new mnemonic will be generated. -newAccount :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => NewAccount - -> SqlPersistT m (Entity Account, Maybe Mnemonic) -newAccount NewAccount{..} = do - unless (validAccountType newAccountType) $ - throwM $ WalletException "Invalid account type" - - when (isJust newAccountMnemonic && isJust newAccountMaster) $ throwM $ - WalletException "Can not set both master key and mnemonic" - - let pass = cs $ fromMaybe "" newAccountPassword - - (mnemonicM, keyM) <- case newAccountMaster of - Just xPrv -> return (Nothing, Just xPrv) - _ -> case newAccountMnemonic of - Just ms -> do - root <- mnemonicToPrvKey pass $ cs ms - -- Don't return our own mnemonic as we don't want it displayed - return (Nothing, Just root) - _ -> if null newAccountKeys - then do - -- Generate a key if nothing was provided - let defEnt = configEntropy def - ms <- genMnemonic $ fromMaybe defEnt newAccountEntropy - root <- mnemonicToPrvKey pass ms - return (Just ms, Just root) - else return (Nothing, Nothing) - - -- TODO: If account already exists, increase the derivation index - let d = fromMaybe 0 newAccountDeriv - accKeyM = (`rootToAccKey` d) <$> keyM - accPubKeys = maybeToList (deriveXPubKey <$> accKeyM) ++ newAccountKeys - - -- Build the account - now <- liftIO getCurrentTime - let acc = Account - { accountName = newAccountName - , accountType = newAccountType - -- Never store private keys for read only accounts - , accountMaster = if newAccountReadOnly - then Nothing - else accKeyM - , accountKeys = nub accPubKeys - , accountGap = 0 - , accountCreated = now - } - - -- Check if all the keys are valid - unless (isValidAccKeys acc) $ - throwM $ WalletException "Invalid account keys" - - -- Insert our account in the database - let canSetGap = isCompleteAccount acc - newAcc = acc{ accountGap = if canSetGap then 10 else 0 } - - insertUnique newAcc >>= \resM -> case resM of - -- The account got created. - Just ai -> do - let accE = Entity ai newAcc - -- If we can set the gap, create the gap addresses - when canSetGap $ initGap accE - -- The mnemonic is returned to eventually be displayed as it is - -- not stored anywhere. - return (accE, mnemonicM) - -- The account already exists - Nothing -> throwM $ WalletException "Account already exists" - -renameAccount :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account - -> AccountName - -> SqlPersistT m Account -renameAccount (Entity ai acc) name = do - P.update ai [ AccountName P.=. name ] - return $ acc{ accountName = name } - --- | Add new thirdparty keys to a multisignature account. This function can --- fail if the multisignature account already has all required keys. -addAccountKeys :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account -- ^ Account Entity - -> [XPubKey] -- ^ Thirdparty public keys to add - -> SqlPersistT m Account -- ^ Account information -addAccountKeys (Entity ai acc) keys - -- We can only add keys on incomplete accounts - | isCompleteAccount acc = throwM $ - WalletException "The account is already complete" - | null keys || not (isValidAccKeys accKeys) = throwM $ - WalletException "Invalid account keys" - | otherwise = do - let canSetGap = isCompleteAccount accKeys - updGap = [ AccountGap P.=. 10 | canSetGap ] - newAcc = accKeys{ accountGap = if canSetGap then 10 else 0 } - -- Update the account with the keys and the new gap if it is complete - P.update ai $ (AccountKeys P.=. newKeys) : updGap - -- If we can set the gap, create the gap addresses - when canSetGap $ initGap $ Entity ai newAcc - return newAcc - where - newKeys = accountKeys acc ++ keys - accKeys = acc{ accountKeys = newKeys } - -isValidAccKeys :: Account -> Bool -isValidAccKeys Account{..} = testMaster && case accountType of - AccountRegular -> length accountKeys == 1 - AccountMultisig _ n -> goMultisig n - where - goMultisig n = - length accountKeys == length (nub accountKeys) && - length accountKeys <= n && not (null accountKeys) - testMaster = case accountMaster of - Just m -> deriveXPubKey m `elem` accountKeys - Nothing -> True - --- Helper functions to get an Account if it exists, or throw an exception --- otherwise. -getAccount :: (MonadIO m, MonadThrow m) => AccountName - -> SqlPersistT m (Entity Account) -getAccount accountName = do - as <- select $ from $ \a -> do - where_ $ a ^. AccountName ==. val accountName - return a - case as of - (accEnt:_) -> return accEnt - _ -> throwM $ WalletException $ unwords - [ "Account", unpack accountName, "does not exist" ] - -{- Addresses -} - --- | Get an address if it exists, or throw an exception otherwise. Fetching --- addresses in the hidden gap will also throw an exception. -getAddress :: (MonadIO m, MonadThrow m) - => Entity Account -- ^ Account Entity - -> AddressType -- ^ Address type - -> KeyIndex -- ^ Derivation index (key) - -> SqlPersistT m (Entity WalletAddr) -- ^ Address -getAddress accE@(Entity ai _) addrType index = do - res <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - &&. x ^. WalletAddrIndex ==. val index - &&. x ^. WalletAddrIndex <. subSelectAddrCount accE addrType - ) - limit 1 - return x - case res of - (addrE:_) -> return addrE - _ -> throwM $ WalletException $ unwords - [ "Invalid address index", show index ] - --- | All addresses in the wallet, including hidden gap addresses. This is useful --- for building a bloom filter. -addressesAll :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => SqlPersistT m [WalletAddr] -addressesAll = fmap (map entityVal) $ select $ from return - --- | All addresses in one account excluding hidden gap. -addresses :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account -- ^ Account Entity - -> AddressType -- ^ Address Type - -> SqlPersistT m [WalletAddr] -- ^ Addresses -addresses accE@(Entity ai _) addrType = fmap (map entityVal) $ - select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - &&. x ^. WalletAddrIndex <. subSelectAddrCount accE addrType - ) - return x - --- | Get address list. -addressList :: MonadIO m - => Entity Account -- ^ Account Entity - -> AddressType -- ^ Address type - -> ListRequest -- ^ List request - -> SqlPersistT m ([WalletAddr], Word32) - -- ^ List result -addressList accE@(Entity ai _) addrType ListRequest{..} = do - cnt <- addressCount accE addrType - - when (listOffset > 0 && listOffset >= cnt) $ throw $ WalletException - "Offset beyond end of data set" - - res <- fmap (map entityVal) $ select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - &&. x ^. WalletAddrIndex <. val cnt - ) - let order = if listReverse then asc else desc - orderBy [ order (x ^. WalletAddrIndex) ] - when (listLimit > 0) $ limit $ fromIntegral listLimit - when (listOffset > 0) $ offset $ fromIntegral listOffset - return x - - return (res, cnt) - --- | Get a count of all the addresses in an account -addressCount :: MonadIO m - => Entity Account -- ^ Account Entity - -> AddressType -- ^ Address type - -> SqlPersistT m Word32 -- ^ Address Count -addressCount (Entity ai acc) addrType = do - res <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - ) - return countRows - let cnt = maybe 0 unValue $ listToMaybe res - return $ if cnt > accountGap acc then cnt - accountGap acc else 0 - --- | Get a list of all unused addresses. -unusedAddresses :: MonadIO m - => Entity Account -- ^ Account ID - -> AddressType -- ^ Address type - -> ListRequest - -> SqlPersistT m ([WalletAddr], Word32) -- ^ Unused addresses -unusedAddresses (Entity ai acc) addrType ListRequest{..} = do - cntRes <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - ) - return countRows - - let cnt = maybe 0 unValue $ listToMaybe cntRes - - when (listOffset > 0 && listOffset >= gap) $ throw $ WalletException - "Offset beyond end of data set" - - res <- fmap (map entityVal) $ select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - ) - orderBy [ order $ x ^. WalletAddrIndex ] - limit $ fromIntegral $ lim cnt - offset $ fromIntegral $ off cnt - return x - return (res, gap) - where - gap = accountGap acc - lim' = if listLimit > 0 then listLimit else gap - off cnt | listReverse = listOffset + gap - | otherwise = cnt - 2 * gap + listOffset - lim cnt | listReverse = min lim' (gap - listOffset) - | otherwise = min lim' (cnt - off cnt - gap) - order = if listReverse then desc else asc - -lookupByPubKey :: (MonadIO m, MonadThrow m) - => Entity Account -- ^ Account Entity - -> PubKeyC -- ^ Pubkey of interest - -> AddressType -- ^ Address type - -> SqlPersistT m [WalletAddr] -lookupByPubKey (Entity ai _) key addrType = - fmap (map entityVal) $ select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - &&. x ^. WalletAddrKey ==. val (Just key) - ) - return x - --- | Add a label to an address. -setAddrLabel :: (MonadIO m, MonadThrow m) - => Entity Account -- ^ Account ID - -> KeyIndex -- ^ Derivation index - -> AddressType -- ^ Address type - -> Text -- ^ New label - -> SqlPersistT m WalletAddr -setAddrLabel accE i addrType label = do - Entity addrI addr <- getAddress accE addrType i - P.update addrI [ WalletAddrLabel P.=. label ] - return $ addr{ walletAddrLabel = label } - --- | Returns the private key of an address. -addressPrvKey :: (MonadIO m, MonadThrow m) - => Entity Account -- ^ Account Entity - -> Maybe XPrvKey -- ^ If not in account - -> KeyIndex -- ^ Derivation index of the address - -> AddressType -- ^ Address type - -> SqlPersistT m PrvKeyC -- ^ Private key -addressPrvKey accE@(Entity ai acc) masterM index addrType = do - ret <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - &&. x ^. WalletAddrIndex ==. val index - &&. x ^. WalletAddrIndex <. subSelectAddrCount accE addrType - ) - return $ x ^. WalletAddrIndex - case ret of - (Value idx:_) -> do - accKey <- case accountMaster acc <|> masterM of - Just key -> return key - Nothing -> throwM $ WalletException "Could not get private key" - let addrKey = - prvSubKey (prvSubKey accKey (addrTypeIndex addrType)) idx - return $ xPrvKey addrKey - _ -> throwM $ WalletException "Invalid address" - --- | Create new addresses in an account and increment the internal bloom filter. --- This is a low-level function that simply creates the desired amount of new --- addresses in an account, disregarding visible and hidden address gaps. You --- should use the function `setAccountGap` if you want to control the gap of an --- account instead. -createAddrs :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account - -> AddressType - -> Word32 - -> SqlPersistT m [WalletAddr] -createAddrs (Entity ai acc) addrType n - | n == 0 = throwM $ WalletException $ - unwords [ "Invalid value", show n ] - | not (isCompleteAccount acc) = - throwM $ WalletException $ unwords - [ "Keys are still missing from the incomplete account" - , unpack $ accountName acc - ] - | otherwise = do - now <- liftIO getCurrentTime - -- Find the next derivation index from the last address - lastRes <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - ) - return $ max_ (x ^. WalletAddrIndex) - let nextI = case lastRes of - (Value (Just lastI):_) -> lastI + 1 - _ -> 0 - build (addr, keyM, rdmM, i) = WalletAddr - { walletAddrAccount = ai - , walletAddrAddress = addr - , walletAddrIndex = i - , walletAddrType = addrType - , walletAddrLabel = "" - , walletAddrRedeem = rdmM - , walletAddrKey = keyM - , walletAddrCreated = now - } - res = map build $ take (fromIntegral n) $ deriveFrom nextI - - -- Save the addresses and increment the bloom filter - splitInsertMany_ res - incrementFilter res - return res - where - -- Branch type (external = 0, internal = 1) - branchType = addrTypeIndex addrType - deriveFrom = case accountType acc of - AccountMultisig m _ -> - let f (a, r, i) = (a, Nothing, Just r, i) - deriv = Deriv :/ branchType - in map f . derivePathMSAddrs (accountKeys acc) deriv m - AccountRegular -> case accountKeys acc of - (key:_) -> let f (a, k, i) = (a, Just k, Nothing, i) - in map f . derivePathAddrs key (Deriv :/ branchType) - [] -> throw $ WalletException $ unwords - [ "createAddrs: No key in regular account (corrupt database)" - , unpack $ accountName acc - ] - --- | Generate all the addresses up to certain index. -generateAddrs :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account - -> AddressType - -> KeyIndex - -> SqlPersistT m Int -generateAddrs accE addrType genIndex = do - cnt <- addressCount accE addrType - let toGen = fromIntegral genIndex - fromIntegral cnt + 1 - if toGen > 0 - then do - void $ createAddrs accE addrType $ fromIntegral toGen - return toGen - else return 0 - --- | Use an address and make sure we have enough gap addresses after it. --- Returns the new addresses that have been created. -useAddress :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => WalletAddr -> SqlPersistT m [WalletAddr] -useAddress WalletAddr{..} = do - res <- select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val walletAddrAccount - &&. x ^. WalletAddrType ==. val walletAddrType - &&. x ^. WalletAddrIndex >. val walletAddrIndex - ) - return countRows - case res of - (Value cnt:_) -> get walletAddrAccount >>= \accM -> case accM of - Just acc -> do - let accE = Entity walletAddrAccount acc - gap = fromIntegral (accountGap acc) :: Int - missing = 2*gap - cnt - if missing > 0 - then createAddrs accE walletAddrType $ fromIntegral missing - else return [] - _ -> return [] -- Should not happen - _ -> return [] -- Should not happen - --- | Set the address gap of an account to a new value. This will create new --- internal and external addresses as required. The gap can only be increased, --- not decreased in size. -setAccountGap :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account -- ^ Account Entity - -> Word32 -- ^ New gap value - -> SqlPersistT m (Entity Account) -setAccountGap accE@(Entity ai acc) gap - | not (isCompleteAccount acc) = - throwM $ WalletException $ unwords - [ "Keys are still missing from the incomplete account" - , unpack $ accountName acc - ] - | missing <= 0 = throwM $ WalletException - "The gap of an account can only be increased" - | otherwise = do - _ <- createAddrs accE AddressExternal $ fromInteger $ missing*2 - _ <- createAddrs accE AddressInternal $ fromInteger $ missing*2 - P.update ai [ AccountGap P.=. gap ] - return $ Entity ai acc{ accountGap = gap } - where - missing = toInteger gap - toInteger (accountGap acc) - --- Return the creation time of the first address in the wallet. -firstAddrTime :: MonadIO m => SqlPersistT m (Maybe Timestamp) -firstAddrTime = do - res <- select $ from $ \x -> do - orderBy [ asc (x ^. WalletAddrId) ] - limit 1 - return $ x ^. WalletAddrCreated - return $ case res of - (Value d:_) -> Just $ toPOSIX d - _ -> Nothing - where - toPOSIX = fromInteger . round . utcTimeToPOSIXSeconds - -{- Bloom filters -} - --- | Add the given addresses to the bloom filter. If the number of elements --- becomes too large, a new bloom filter is computed from scratch. -incrementFilter :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => [WalletAddr] - -> SqlPersistT m () -incrementFilter addrs = do - (bloom, elems, _) <- getBloomFilter - let newElems = elems + (length addrs * 2) - if filterLen newElems > filterLen elems - then computeNewFilter - else setBloomFilter (addToFilter bloom addrs) newElems - --- | Generate a new bloom filter from the data in the database -computeNewFilter :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => SqlPersistT m () -computeNewFilter = do - (_, _, fpRate) <- getBloomFilter - -- Create a new empty bloom filter - -- TODO: Choose a random nonce for the bloom filter - -- TODO: Check global bloom filter length limits - cntRes <- select $ from $ \x -> return $ count $ x ^. WalletAddrId - let elems = maybe 0 unValue $ listToMaybe cntRes - newBloom = bloomCreate (filterLen elems) fpRate 0 BloomUpdateNone - addrs <- addressesAll - let bloom = addToFilter newBloom addrs - setBloomFilter bloom elems - --- Compute the size of a filter given a number of elements. Scale --- the filter length by powers of 2. -filterLen :: Int -> Int -filterLen = round . pow2 . ceiling . log2 - where - pow2 x = (2 :: Double) ** fromInteger x - log2 x = logBase (2 :: Double) (fromIntegral x) - --- | Add elements to a bloom filter -addToFilter :: BloomFilter -> [WalletAddr] -> BloomFilter -addToFilter bloom addrs = - bloom3 - where - pks = mapMaybe walletAddrKey addrs - rdms = mapMaybe walletAddrRedeem addrs - -- Add the Hash160 of the addresses - f1 b a = bloomInsert b $ encode $ getAddrHash a - bloom1 = foldl f1 bloom $ map walletAddrAddress addrs - -- Add the redeem scripts - f2 b r = bloomInsert b $ encodeOutputBS r - bloom2 = foldl f2 bloom1 rdms - -- Add the public keys - f3 b p = bloomInsert b $ encode p - bloom3 = foldl f3 bloom2 pks - --- | Returns a bloom filter containing all the addresses in this wallet. This --- includes internal and external addresses. The bloom filter can be set on a --- peer connection to filter the transactions received by that peer. -getBloomFilter :: (MonadIO m, MonadThrow m) - => SqlPersistT m (BloomFilter, Int, Double) -getBloomFilter = do - res <- select $ from $ \c -> do - limit 1 - return ( c ^. WalletStateBloomFilter - , c ^. WalletStateBloomElems - , c ^. WalletStateBloomFp - ) - case res of - ((Value b, Value n, Value fp):_) -> return (b, n, fp) - _ -> throwM $ - WalletException "getBloomFilter: Database not initialized" - --- | Save a bloom filter and the number of elements it contains -setBloomFilter :: MonadIO m => BloomFilter -> Int -> SqlPersistT m () -setBloomFilter bloom elems = - P.updateWhere [] [ WalletStateBloomFilter P.=. bloom - , WalletStateBloomElems P.=. elems - ] - --- Helper function to compute the redeem script of a given derivation path --- for a given multisig account. -getPathRedeem :: Account -> SoftPath -> RedeemScript -getPathRedeem acc@Account{..} deriv = case accountType of - AccountMultisig m _ -> if isCompleteAccount acc - then sortMulSig $ PayMulSig pubKeys m - else throw $ WalletException $ unwords - [ "getPathRedeem: Incomplete multisig account" - , unpack accountName - ] - _ -> throw $ WalletException $ unwords - [ "getPathRedeem: Account", unpack accountName - , "is not a multisig account" - ] - where - f = toPubKeyG . xPubKey . derivePubPath deriv - pubKeys = map f accountKeys - --- Helper function to compute the public key of a given derivation path for --- a given non-multisig account. -getPathPubKey :: Account -> SoftPath -> PubKeyC -getPathPubKey acc@Account{..} deriv - | isMultisigAccount acc = throw $ WalletException $ - unwords [ "getPathPubKey: Account", unpack accountName - , "is not a regular non-multisig account" - ] - | otherwise = case accountKeys of - (key:_) -> xPubKey $ derivePubPath deriv key - _ -> throw $ WalletException $ unwords - [ "getPathPubKey: No keys are available in account" - , unpack accountName - ] - -{- Helpers -} - -subSelectAddrCount :: Entity Account - -> AddressType - -> SqlExpr (Value KeyIndex) -subSelectAddrCount (Entity ai acc) addrType = - sub_select $ from $ \x -> do - where_ ( x ^. WalletAddrAccount ==. val ai - &&. x ^. WalletAddrType ==. val addrType - ) - let gap = val $ accountGap acc - return $ case_ - [ when_ (countRows >. gap) - then_ (countRows -. gap) - ] (else_ $ val 0) - -validMultisigParams :: Int -> Int -> Bool -validMultisigParams m n = n >= 1 && n <= 15 && m >= 1 && m <= n - -validAccountType :: AccountType -> Bool -validAccountType t = case t of - AccountRegular -> True - AccountMultisig m n -> validMultisigParams m n - -isMultisigAccount :: Account -> Bool -isMultisigAccount acc = case accountType acc of - AccountRegular -> False - AccountMultisig{} -> True - -isReadAccount :: Account -> Bool -isReadAccount = isNothing . accountMaster - -isCompleteAccount :: Account -> Bool -isCompleteAccount acc = case accountType acc of - AccountRegular -> length (accountKeys acc) == 1 - AccountMultisig _ n -> length (accountKeys acc) == n - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Block.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Block.hs deleted file mode 100644 index f71b07b7..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Block.hs +++ /dev/null @@ -1,59 +0,0 @@ -module Network.Haskoin.Wallet.Block where - -import Control.Exception (throw) -import Control.Monad.Catch (MonadThrow, throwM) -import Control.Monad.Trans (MonadIO) -import Data.Maybe (fromMaybe) -import Database.Persist.Sql (SqlPersistT) -import Network.Haskoin.Block -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Wallet.Model -import Network.Haskoin.Wallet.Transaction -import Network.Haskoin.Wallet.Types - -mainChain :: (MonadIO m, MonadThrow m) - => Either BlockHeight BlockHash - -> ListRequest - -> SqlPersistT m (ListResult NodeBlock) -mainChain blockE ListRequest{..} = do - bestHash <- fst <$> walletBestBlock - bestM <- getBlockByHash bestHash - best <- maybe (throwM $ WalletException "Could not find wallet best block") - return bestM - remoteNode <- case blockE of - Right h -> do - remoteNodeM <- getBlockByHash h - maybe (throwM $ WalletException "Colud not get remote node") - return remoteNodeM - Left h -> do - heightNodeM <- getBlockByHeight best h - maybe (throwM $ WalletException "Could not find bock height") - return heightNodeM - frst <- (+1) . nodeBlockHeight <$> splitBlock best remoteNode - if nodeBlockHeight best < frst - then return $ ListResult [] 0 - else do - let cnt = nodeBlockHeight best - frst - limit = min listLimit (cnt - listOffset) - offset = - if listReverse - then cnt - listOffset - limit - else listOffset - nodes <- getBlocksFromHeight best limit (frst + offset) - return $ ListResult nodes cnt - -blockTxs :: [NodeBlock] -> [WalletTx] -> [(NodeBlock, [WalletTx])] -blockTxs blocks transactions = reverse $ go [] blocks transactions - where - go bs [] _ = bs - go bs (n:ns) [] = go ((n,[]):bs) ns [] - go [] (n:ns) xs = go [(n,[])] ns xs - go (b:bs) (n:ns) (x:xs) - | nodeHash (fst b) == blockHashOf x = - go ((fst b, x : snd b) : bs) (n:ns) xs - | nodeHash n == blockHashOf x = - go ((n, [x]) : b : bs) ns xs - | otherwise = go ((n, []) : b : bs) ns (x:xs) - blockHashOf t = fromMaybe - (throw $ WalletException "Unexpected unconfirmed transaction") - (walletTxConfirmedBy t) diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Client.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Client.hs deleted file mode 100644 index e19ed466..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Client.hs +++ /dev/null @@ -1,265 +0,0 @@ -module Network.Haskoin.Wallet.Client (clientMain) where - -import System.FilePath (()) -import System.Directory (createDirectoryIfMissing) -import System.Posix.Directory (changeWorkingDirectory) -import System.Posix.Files - ( setFileMode - , setFileCreationMask - , unionFileModes - , ownerModes - , groupModes - , otherModes - , fileExist - ) -import System.Environment (getArgs, lookupEnv) -import System.Info (os) -import System.Console.GetOpt - ( getOpt - , usageInfo - , OptDescr (Option) - , ArgDescr (NoArg, ReqArg) - , ArgOrder (Permute) - ) - -import Control.Monad (forM_) -import Control.Monad.Trans (liftIO) -import qualified Control.Monad.Reader as R (runReaderT) - -import Data.Default (def) -import Data.FileEmbed (embedFile) -import Data.Yaml (decodeFileEither) -import Data.String.Conversions (cs) - -import Network.Haskoin.Constants -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Client.Commands -import Network.Haskoin.Wallet.Types - -import System.Exit (exitFailure) -import System.IO (hPutStrLn, stderr) -import System.FilePath.Posix (isAbsolute) - -usageHeader :: String -usageHeader = "Usage: hw [] []" - -cmdHelp :: [String] -cmdHelp = lines $ cs $ $(embedFile "config/help") - -warningMsg :: String -warningMsg = unwords - [ "!!!", "This software is experimental." - , "Use only small amounts of Bitcoins.", "!!!" - ] - -usage :: [String] -usage = warningMsg : usageInfo usageHeader options : cmdHelp - -read' :: Read x => String -> String -> x -read' e s = case reads s of - [(x, "")] -> x - _ -> error e - -options :: [OptDescr (Config -> Config)] -options = - [ Option "c" ["count"] - ( ReqArg - (\s cfg -> cfg { configCount = read' "Could not parse count" s }) - "INT" - ) $ "Items per page. Default: " ++ show (configCount def) - , Option "m" ["minconf"] - ( ReqArg - (\s cfg -> - cfg { configMinConf = read' "Colud not parse minconf" s } - ) "INT" - ) $ "Minimum confirmations. Default: " - ++ show (configMinConf def) - , Option "f" ["fee"] - ( ReqArg - (\s cfg -> cfg { configFee = read' "Could not parse fee" s }) - "INT" - ) $ "Fee per kilobyte. Default: " ++ show (configFee def) - , Option "R" ["rcptfee"] - (NoArg $ \cfg -> cfg { configRcptFee = True }) $ - "Recipient pays fee. Default: " ++ show (configRcptFee def) - , Option "S" ["nosig"] - (NoArg $ \cfg -> cfg { configSignTx = False }) $ - "Do not sign. Default: " ++ show (not $ configSignTx def) - , Option "i" ["internal"] - (NoArg $ \cfg -> cfg { configAddrType = AddressInternal }) $ - "Internal addresses. Default: " - ++ show (configAddrType def == AddressInternal) - , Option "k" ["pubkeys"] - (NoArg $ \cfg -> cfg { configDisplayPubKeys = True }) $ - "Display public keys instead of addresses. Default: " - ++ show (configDisplayPubKeys def) - , Option "o" ["offline"] - (NoArg $ \cfg -> cfg { configOffline = True }) $ - "Offline balance. Default: " ++ show (configOffline def) - , Option "e" ["entropy"] - ( ReqArg ( \s cfg -> cfg { configEntropy = - read' "Could not parse entropy" s - } - ) "INT" - ) $ "Entropy in Bytes between 16 and 32. Default: " - ++ show (configEntropy def) - , Option "r" ["revpage"] - (NoArg $ \cfg -> cfg { configReversePaging = True }) $ - "Reverse paging. Default: " - ++ show (configReversePaging def) - , Option "I" ["index"] - ( ReqArg ( \s cfg -> cfg { configDerivIndex = - read' "Could not parse index" s - } - ) "INT" - ) $ "Derivation index for new accounts. Default: " - ++ show (configDerivIndex def) - , Option "j" ["json"] - (NoArg $ \cfg -> cfg { configFormat = OutputJSON }) - "Output JSON" - , Option "y" ["yaml"] - (NoArg $ \cfg -> cfg { configFormat = OutputYAML }) - "Output YAML" - , Option "s" ["socket"] - (ReqArg (\s cfg -> cfg { configConnect = s }) "URI") $ - "Server socket. Default: " ++ configConnect def - , Option "d" ["detach"] - (NoArg $ \cfg -> cfg { configDetach = True }) $ - "Detach server. Default: " ++ show (configDetach def) - , Option "t" ["testnet"] - (NoArg $ \cfg -> cfg { configTestnet = True }) "Testnet3 network" - , Option "g" ["config"] - (ReqArg (\s cfg -> cfg { configFile = s }) "FILE") $ - "Config file. Default: " ++ configFile def - , Option "w" ["workdir"] - (ReqArg (\s cfg -> cfg { configDir = s }) "DIR") - "Working directory. OS-dependent default" - , Option "v" ["verbose"] - (NoArg $ \cfg -> cfg { configVerbose = True }) "Verbose output" - ] - --- Create and change current working directory -setWorkDir :: Config -> IO () -setWorkDir cfg = do - let workDir = configDir cfg networkName - _ <- setFileCreationMask $ otherModes `unionFileModes` groupModes - createDirectoryIfMissing True workDir - setFileMode workDir ownerModes - changeWorkingDirectory workDir - --- Build application configuration -getConfig :: [Config -> Config] -> IO Config -getConfig fs = do - -- Create initial configuration from defaults and command-line arguments - let initCfg = foldr ($) def fs - - -- If working directory set in initial configuration, use it - dir <- case configDir initCfg of "" -> appDir - d -> return d - - -- Make configuration file relative to working directory - let cfgFile = if isAbsolute (configFile initCfg) - then configFile initCfg - else dir configFile initCfg - - -- Get configuration from file, if it exists - e <- fileExist cfgFile - if e then do - cfgE <- decodeFileEither cfgFile - case cfgE of - Left x -> error $ show x - -- Override settings from file using command-line - Right cfg -> return $ fixConfigDir (foldr ($) cfg fs) dir - else return $ fixConfigDir initCfg dir - where - -- If working directory not set, use default - fixConfigDir cfg dir = case configDir cfg of "" -> cfg{ configDir = dir } - _ -> cfg - - -clientMain :: IO () -clientMain = getArgs >>= \args -> case getOpt Permute options args of - (fs, commands, []) -> do - cfg <- getConfig fs - if configTestnet cfg - then setTestnet - else setProdnet - setWorkDir cfg - dispatchCommand cfg commands - (_, _, msgs) -> forM_ (msgs ++ usage) putStrLn - -dispatchCommand :: Config -> [String] -> IO () -dispatchCommand cfg args = flip R.runReaderT cfg $ case args of - "start" : [] -> cmdStart - "stop" : [] -> cmdStop - "newacc" : name : [] -> cmdNewAcc False name [] - "newread" : name : [] -> cmdNewAcc True name [] - "newms" : name : m : n : [] -> cmdNewAcc False name [m, n] - "newreadms" : name : m : n : [] -> cmdNewAcc True name [m, n] - "addkey" : name : [] -> cmdAddKey name - "setgap" : name : gap : [] -> cmdSetGap name gap - "account" : name : [] -> cmdAccount name - "accounts" : page -> cmdAccounts page - "rename" : name : new : [] -> cmdRenameAcc name new - "list" : name : page -> cmdList name page - "unused" : name : page -> cmdUnused name page - "label" : name : index : label : [] -> cmdLabel name index label - "uri" : name : index : ls -> cmdURI name index ls - "txs" : name : page -> cmdTxs name page - "addrtxs" : name : index : page -> cmdAddrTxs name index page - "getindex" : name : key : [] -> cmdGetIndex name key - "genaddrs" : name : i : [] -> cmdGenAddrs name i - "send" : name : add : amnt : [] -> cmdSend name add amnt - "sendmany" : name : xs -> cmdSendMany name xs - "import" : name : [] -> cmdImport name - "sign" : name : txid : [] -> cmdSign name txid - "gettx" : name : txid : [] -> cmdGetTx name txid - "balance" : name : [] -> cmdBalance name - "getoffline" : name : txid : [] -> cmdGetOffline name txid - "signoffline" : name : [] -> cmdSignOffline name - "rescan" : rescantime -> cmdRescan rescantime - "deletetx" : txid : [] -> cmdDeleteTx txid - "sync" : name : block : page -> cmdSync name block page - "pending" : name : page -> cmdPending name page - "dead" : name : page -> cmdDead name page - "monitor" : name -> cmdMonitor name - "decodetx" : [] -> cmdDecodeTx - "dice" : rolls : [] -> cmdDice rolls - "status" : [] -> cmdStatus - "keypair" : [] -> cmdKeyPair - "blockinfo" : hashes -> cmdBlockInfo hashes - "version" : [] -> cmdVersion - "help" : [] -> liftIO $ forM_ usage (hPutStrLn stderr) - [] -> liftIO $ forM_ usage (hPutStrLn stderr) - _ -> liftIO $ - forM_ ("Invalid command" : usage) (hPutStrLn stderr) >> exitFailure - -appDir :: IO FilePath -appDir = case os of "mingw" -> windows - "mingw32" -> windows - "mingw64" -> windows - "darwin" -> osx - "linux" -> unix - _ -> unix - where - windows = do - localAppData <- lookupEnv "LOCALAPPDATA" - dirM <- case localAppData of - Nothing -> lookupEnv "APPDATA" - Just l -> return $ Just l - case dirM of - Just d -> return $ d "Haskoin Wallet" - Nothing -> return "." - osx = do - homeM <- lookupEnv "HOME" - case homeM of - Just home -> return $ home "Library" - "Application Support" - "Haskoin Wallet" - Nothing -> return "." - unix = do - homeM <- lookupEnv "HOME" - case homeM of - Just home -> return $ home ".hw" - Nothing -> return "." - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Client/Commands.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Client/Commands.hs deleted file mode 100644 index 610ab263..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Client/Commands.hs +++ /dev/null @@ -1,1023 +0,0 @@ -module Network.Haskoin.Wallet.Client.Commands -( cmdStart -, cmdStop -, cmdNewAcc -, cmdAddKey -, cmdSetGap -, cmdAccount -, cmdRenameAcc -, cmdAccounts -, cmdList -, cmdUnused -, cmdLabel -, cmdURI -, cmdTxs -, cmdAddrTxs -, cmdGetIndex -, cmdGenAddrs -, cmdSend -, cmdSendMany -, cmdImport -, cmdSign -, cmdBalance -, cmdGetTx -, cmdGetOffline -, cmdSignOffline -, cmdRescan -, cmdDecodeTx -, cmdVersion -, cmdStatus -, cmdBlockInfo -, cmdMonitor -, cmdSync -, cmdKeyPair -, cmdDeleteTx -, cmdPending -, cmdDead -, cmdDice -, decodeBase6 -, diceToEntropy -, diceToMnemonic -, mixEntropy -) -where - -import Control.Concurrent.Async.Lifted (async, wait) -import Control.Monad -import qualified Control.Monad.Reader as R -import Control.Monad.Trans (liftIO) -import Data.Aeson -import Data.Bits (xor) -import qualified Data.ByteString as BS -import qualified Data.ByteString.Base64 as B64 -import qualified Data.ByteString.Char8 as B8 -import Data.Either (fromRight) -import Data.List (intercalate, - intersperse) -import Data.Maybe -import Data.Monoid ((<>)) -import Data.Restricted (rvalue) -import qualified Data.Serialize as S -import Data.String (fromString) -import Data.String.Conversions (cs) -import qualified Data.Text as T -import qualified Data.Time.Format as Time -import Data.Word (Word32, Word64) -import qualified Data.Yaml as YAML -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Crypto -import Network.Haskoin.Node.STM -import Network.Haskoin.Script -import Network.Haskoin.Transaction -import Network.Haskoin.Util -import Network.Haskoin.Wallet.Accounts (rootToAccKey) -import qualified Network.Haskoin.Wallet.Client.PrettyJson as JSON -import Network.Haskoin.Wallet.Server -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Types -import qualified Network.URI.Encode as URI -import Numeric (readInt) -import qualified System.Console.Haskeline as Haskeline -import System.Entropy (getEntropy) -import System.IO (stderr) -import System.ZMQ4 -import Text.Read (readMaybe) - -type Handler = R.ReaderT Config IO - -accountExists :: String -> Handler Bool -accountExists name = do - resE <- sendZmq (AccountReq $ T.pack name) - case (resE :: Either String (WalletResponse JsonAccount)) of - Right (ResponseValid _) -> return True - Right (ResponseError _) -> return False - Left err -> error err - -accountKeyExists :: String -> Handler Bool -accountKeyExists name = do - accM <- parseResponse <$> sendZmq (AccountReq $ T.pack name) - return $ isJust $ jsonAccountMaster =<< accM - -data ParsedKey = ParsedXPrvKey !XPrvKey - | ParsedXPubKey !XPubKey - | ParsedMnemonic !Mnemonic !Passphrase !XPrvKey - | ParsedNothing - -askMnemonicOrKey :: String -> Handler ParsedKey -askMnemonicOrKey msg = - go . cs =<< askInput msg - where - go "" = return ParsedNothing - go str = case xPrvImport str of - Just k -> return $ ParsedXPrvKey k - _ -> case xPubImport str of - Just p -> return $ ParsedXPubKey p - -- This first check is just to verify if the mnemonic parses - _ -> case mnemonicToSeed "" str of - Right _ -> do - pass <- cs <$> askPassword - let seed = fromRight (error "Could not decode mnemonic seed") $ - mnemonicToSeed (cs pass) str - return $ ParsedMnemonic str pass (makeXPrvKey seed) - _ -> error "Could not parse mnemonic or extended key" - -askInput :: String -> Handler String -askInput msg = do - inputM <- liftIO $ - Haskeline.runInputT Haskeline.defaultSettings $ - Haskeline.getPassword (Just '*') msg - return $ fromMaybe err inputM - where - err = error "No action due to EOF" - -askPassword :: Handler String -askPassword = do - pass <- askInput "Mnemonic password or leave empty: " - unless (null pass) $ do - pass2 <- askInput "Enter your mnemonic password again: " - when (pass /= pass2) $ error "Passwords do not match" - return pass - -askSigningKeys :: String -> Handler (Maybe XPrvKey) -askSigningKeys name = - -- Only ask for signing keys if the account doesn't have one already - go =<< accountKeyExists name - where - go True = return Nothing - go _ = do - input <- askMnemonicOrKey "Enter mnemonic or extended private key: " - case input of - ParsedXPrvKey k -> return $ Just k - ParsedMnemonic _ _ k -> return $ Just k - _ -> error "Need a private key to sign" - --- hw start [config] [--detach] -cmdStart :: Handler () -cmdStart = do - cfg <- R.ask - liftIO $ runSPVServer cfg - --- hw stop [config] -cmdStop :: Handler () -cmdStop = do - sendZmq StopServerReq >>= (`handleResponse` (\() -> return ())) - liftIO $ putStrLn "Process stopped" - --- First argument: is account read-only? -cmdNewAcc :: Bool -> String -> [String] -> Handler () -cmdNewAcc readOnly name ls = do - e <- R.asks configEntropy - d <- R.asks configDerivIndex - _ <- return $! typ - accountExists name >>= (`when` error "Account exists") - (masterM, keyM, mnemonicM, passM) <- go =<< askMnemonicOrKey - "Enter mnemonic, extended key or leave empty to generate: " - let newAcc = NewAccount - { newAccountName = T.pack name - , newAccountType = typ - , newAccountMnemonic = cs <$> mnemonicM - , newAccountPassword = cs <$> passM - , newAccountEntropy = Just e - , newAccountMaster = masterM - , newAccountDeriv = Just d - , newAccountKeys = maybeToList keyM - , newAccountReadOnly = readOnly - } - resE <- sendZmq $ NewAccountReq newAcc - handleResponse resE $ liftIO . putStr . printAccount - where - go (ParsedXPrvKey k) = return (Just k, Nothing, Nothing, Nothing) - go (ParsedXPubKey p) = return (Nothing, Just p, Nothing, Nothing) - go (ParsedMnemonic m x _) = return (Nothing, Nothing, Just m, Just x) - go ParsedNothing = do - pass <- cs <$> askPassword - return (Nothing, Nothing, Nothing, Just pass) - typ = case ls of - [] -> AccountRegular - [mS, nS] -> fromMaybe (error "Account information incorrect") $ do - m <- readMaybe mS - n <- readMaybe nS - return $ AccountMultisig m n - _ -> error "Number of parametres incorrect" - -cmdAddKey :: String -> Handler () -cmdAddKey name = do - d <- R.asks configDerivIndex - accountExists name >>= (`unless` error "Account does not exist") - let msg = "Enter mnemonic or extended private key: " - key <- askMnemonicOrKey msg >>= \pk -> return $ case pk of - ParsedXPrvKey k -> deriveXPubKey $ rootToAccKey k d - ParsedMnemonic _ _ k -> deriveXPubKey $ rootToAccKey k d - ParsedXPubKey p -> p - ParsedNothing -> error "Invalid empty input" - resE <- sendZmq (AddPubKeysReq (T.pack name) [key]) - handleResponse resE $ liftIO . putStr . printAccount - -cmdSetGap :: String -> String -> Handler () -cmdSetGap name gap = do - resE <- sendZmq (SetAccountGapReq (T.pack name) newGap) - handleResponse resE $ liftIO . putStr . printAccount - where - newGap = read gap - -cmdAccount :: String -> Handler () -cmdAccount name = do - resE <- sendZmq (AccountReq $ T.pack name) - handleResponse resE $ liftIO . putStr . printAccount - -cmdAccounts :: [String] -> Handler () -cmdAccounts ls = do - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - listAction page AccountsReq $ \ts -> do - let xs = map (liftIO . putStr . printAccount) ts - sequence_ $ intersperse (liftIO $ putStrLn "-") xs - -cmdRenameAcc :: String -> String -> Handler () -cmdRenameAcc oldName newName = do - resE <- sendZmq $ RenameAccountReq (T.pack oldName) (T.pack newName) - handleResponse resE $ liftIO . putStr . printAccount - -listAction :: (FromJSON a, ToJSON a) - => Word32 - -> (ListRequest -> WalletRequest) - -> ([a] -> Handler ()) - -> Handler () -listAction page requestBuilder action = do - c <- R.asks configCount - r <- R.asks configReversePaging - case c of - 0 -> do - let listReq = ListRequest 0 0 r - resE <- sendZmq (requestBuilder listReq) - handleResponse resE $ \(ListResult a _) -> action a - _ -> do - when (page < 1) $ error "Page cannot be less than 1" - let listReq = ListRequest ((page - 1) * c) c r - resE <- sendZmq (requestBuilder listReq) - handleResponse resE $ \(ListResult a m) -> case m of - 0 -> liftIO . putStrLn $ "No elements" - _ -> do - liftIO . putStrLn $ - "Page " ++ show page ++ " of " ++ show (pages m c) ++ - " (" ++ show m ++ " elements)" - action a - where - pages m c | m `mod` c == 0 = m `div` c - | otherwise = m `div` c + 1 - -cmdList :: String -> [String] -> Handler () -cmdList name ls = do - t <- R.asks configAddrType - m <- R.asks configMinConf - o <- R.asks configOffline - p <- R.asks configDisplayPubKeys - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - f = AddrsReq (T.pack name) t m o - listAction page f $ \as -> forM_ as (liftIO . putStrLn . printAddress p) - -cmdUnused :: String -> [String] -> Handler () -cmdUnused name ls = do - t <- R.asks configAddrType - p <- R.asks configDisplayPubKeys - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - f = UnusedAddrsReq (T.pack name) t - listAction page f $ \as -> forM_ (as :: [JsonAddr]) $ - liftIO . putStrLn . printAddress p - -cmdLabel :: String -> String -> String -> Handler () -cmdLabel name iStr label = do - t <- R.asks configAddrType - p <- R.asks configDisplayPubKeys - resE <- sendZmq (SetAddrLabelReq (T.pack name) i t addrLabel) - handleResponse resE $ liftIO . putStrLn . printAddress p - where - i = read iStr - addrLabel = T.pack label - -cmdTxs :: String -> [String] -> Handler () -cmdTxs name ls = do - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - r <- R.asks configReversePaging - listAction page (TxsReq (T.pack name)) $ \ts -> do - let xs = map (liftIO . putStr . printTx Nothing) ts - xs' = if r then xs else reverse xs - sequence_ $ intersperse (liftIO $ putStrLn "-") xs' - -cmdPending :: String -> [String] -> Handler () -cmdPending name ls = do - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - r <- R.asks configReversePaging - listAction page (PendingTxsReq (T.pack name)) $ \ts -> do - let xs = map (liftIO . putStr . printTx Nothing) ts - xs' = if r then xs else reverse xs - sequence_ $ intersperse (liftIO $ putStrLn "-") xs' - -cmdDead :: String -> [String] -> Handler () -cmdDead name ls = do - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - r <- R.asks configReversePaging - listAction page (DeadTxsReq (T.pack name)) $ \ts -> do - let xs = map (liftIO . putStr . printTx Nothing) ts - xs' = if r then xs else reverse xs - sequence_ $ intersperse (liftIO $ putStrLn "-") xs' - -cmdAddrTxs :: String -> String -> [String] -> Handler () -cmdAddrTxs name i ls = do - t <- R.asks configAddrType - m <- R.asks configMinConf - o <- R.asks configOffline - r <- R.asks configReversePaging - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - f = AddrTxsReq (T.pack name) index t - resE <- sendZmq (AddressReq (T.pack name) index t m o) - handleResponse resE $ \JsonAddr{..} -> listAction page f $ \ts -> do - let xs = map (liftIO . putStr . printTx (Just jsonAddrAddress)) ts - xs' = if r then xs else reverse xs - sequence_ $ intersperse (liftIO $ putStrLn "-") xs' - where - index = fromMaybe (error "Could not read index") $ readMaybe i - -cmdGetIndex :: String -> String -> Handler () -cmdGetIndex name k = do - t <- R.asks configAddrType - resE <- sendZmq $ PubKeyIndexReq (T.pack name) (fromString k) t - handleResponse resE go - where - go :: [JsonAddr] -> Handler () - go [] = liftIO $ putStrLn $ "No matching pubkey found" - go as = mapM_ (liftIO . putStrLn . printAddress True) as - -cmdGenAddrs :: String -> String -> Handler () -cmdGenAddrs name i = do - t <- R.asks configAddrType - let req = GenerateAddrsReq (T.pack name) index t - resE <- sendZmq req - handleResponse resE $ \cnt -> liftIO . putStrLn $ - unwords [ "Generated", show (cnt :: Int), "addresses" ] - where - index = read i - --- Build a bitcoin payment request URI -cmdURI :: String -> String -> [String] -> Handler () -cmdURI name iStr ls = do - t <- R.asks configAddrType - resE <- sendZmq (AddressReq (T.pack name) i t 0 False) - case parseResponse resE of - Just a -> do - let uri = buildPaymentRequest (jsonAddrAddress a) ls - liftIO $ putStrLn $ cs uri - Nothing -> error "No address found" - where - i = read iStr - -buildPaymentRequest :: Address -> [String] -> BS.ByteString -buildPaymentRequest a ls = - "bitcoin:" <> addrToBase58 a <> cs params - where - params = if null paramStr then "" else "?" <> paramStr - paramStr = concat $ intersperse "&" $ zipWith ($) - [ ("amount" `buildParam`) . parseAmount - , ("message" `buildParam`) . URI.encode - ] ls - parseAmount str = show (read str :: Double) - buildParam str val = str <> "=" <> val - -cmdSend :: String -> String -> String -> Handler () -cmdSend name addrStr amntStr = cmdSendMany name [addrStr ++ ":" ++ amntStr] - -cmdSendMany :: String -> [String] -> Handler () -cmdSendMany name xs = case rcpsM of - Just rcps -> do - fee <- R.asks configFee - rcptFee <- R.asks configRcptFee - minconf <- R.asks configMinConf - sign <- R.asks configSignTx - masterM <- if sign then askSigningKeys name else return Nothing - let action = CreateTx rcps fee minconf rcptFee sign masterM - resE <- sendZmq (CreateTxReq (T.pack name) action) - handleResponse resE $ liftIO . putStr . printTx Nothing - _ -> error "Could not parse recipient information" - where - g str = map cs $ T.splitOn ":" (T.pack str) - f [a,v] = liftM2 (,) (base58ToAddr a) (readMaybe $ cs v) - f _ = Nothing - rcpsM = mapM (f . g) xs - -getHexTx :: Handler Tx -getHexTx = do - hexM <- Haskeline.runInputT Haskeline.defaultSettings $ - Haskeline.getInputLine "" - let txM = case hexM of - Nothing -> error "No action due to EOF" - Just hex -> eitherToMaybe . S.decode =<< decodeHex (cs hex) - case txM of - Just tx -> return tx - Nothing -> error "Could not parse transaction" - -cmdImport :: String -> Handler () -cmdImport name = do - tx <- getHexTx - resE <- sendZmq (ImportTxReq (T.pack name) tx) - handleResponse resE $ liftIO . putStr . printTx Nothing - -cmdSign :: String -> String -> Handler () -cmdSign name txidStr = case txidM of - Just txid -> do - masterM <- askSigningKeys name - resE <- sendZmq (SignTxReq (T.pack name) $ SignTx txid masterM) - handleResponse resE $ liftIO . putStr . printTx Nothing - _ -> error "Could not parse txid" - where - txidM = hexToTxHash $ cs txidStr - -cmdGetOffline :: String -> String -> Handler () -cmdGetOffline name tidStr = case tidM of - Just tid -> do - resE <- sendZmq (OfflineTxReq (T.pack name) tid) - handleResponse resE $ \otd -> - liftIO $ putStrLn $ cs $ - B64.encode $ S.encode (otd :: OfflineTxData) - _ -> error "Could not parse txid" - where - tidM = hexToTxHash $ cs tidStr - -cmdSignOffline :: String -> Handler () -cmdSignOffline name = do - inputM <- Haskeline.runInputT Haskeline.defaultSettings $ - Haskeline.getInputLine "" - case S.decode =<< B64.decode . cs =<< maybeToEither "" inputM of - Right (OfflineTxData tx dat) -> do - masterM <- askSigningKeys name - resE <- sendZmq (SignOfflineTxReq (T.pack name) masterM tx dat) - handleResponse resE $ \(TxCompleteRes sTx _) -> - liftIO $ putStrLn $ cs $ encodeHex $ S.encode sTx - _ -> error "Could not decode input data" - -cmdBalance :: String -> Handler () -cmdBalance name = do - m <- R.asks configMinConf - o <- R.asks configOffline - resE <- sendZmq (BalanceReq (T.pack name) m o) - handleResponse resE $ \bal -> - liftIO $ putStrLn $ unwords [ "Balance:", show (bal :: Word64) ] - -cmdGetTx :: String -> String -> Handler () -cmdGetTx name tidStr = case tidM of - Just tid -> do - resE <- sendZmq (TxReq (T.pack name) tid) - handleResponse resE $ liftIO . putStr . printTx Nothing - _ -> error "Could not parse txid" - where - tidM = hexToTxHash $ cs tidStr - -cmdRescan :: [String] -> Handler () -cmdRescan timeLs = do - let timeM = case timeLs of - [] -> Nothing - str:_ -> case readMaybe str of - Nothing -> error "Could not decode time" - Just t -> Just t - resE <- sendZmq (NodeActionReq $ NodeActionRescan timeM) - handleResponse resE $ \(RescanRes ts) -> - liftIO $ putStrLn $ unwords [ "Timestamp:", show ts] - -cmdDeleteTx :: String -> Handler () -cmdDeleteTx tidStr = case tidM of - Just tid -> do - resE <- sendZmq (DeleteTxReq tid) - handleResponse resE $ \() -> return () - Nothing -> error "Could not parse txid" - where - tidM = hexToTxHash $ cs tidStr - -cmdMonitor :: [String] -> Handler () -cmdMonitor ls = do - cfg@Config{..} <- R.ask - -- TODO: I can do this in the same thread without ^C twice (see sendZmq) - liftIO $ withContext $ \ctx -> withSocket ctx Sub $ \sock -> do - setLinger (restrict (0 :: Int)) sock - setupAuth cfg sock - connect sock configConnectNotif - subscribe sock "[block]" - forM_ ls $ \name -> subscribe sock $ "{" <> cs name <> "}" - forever $ do - [_,m] <- receiveMulti sock - handleNotif configFormat $ eitherDecode $ cs m - -cmdSync :: String -> String -> [String] -> Handler () -cmdSync acc block ls = do - let page = fromMaybe 1 $ listToMaybe ls >>= readMaybe - f = case length block of - 64 -> SyncReq (cs acc) $ - fromMaybe (error "Could not decode block id") $ - hexToBlockHash $ cs block - _ -> SyncHeightReq (cs acc) $ - fromMaybe (error "Could not decode block height") $ - readMaybe block - r <- R.asks configReversePaging - listAction page f $ \blocks -> do - let blocks' = if r then reverse blocks else blocks - forM_ (blocks' :: [JsonSyncBlock]) $ liftIO . putStrLn . printSyncBlock - -cmdDecodeTx :: Handler () -cmdDecodeTx = do - tx <- getHexTx - format <- R.asks configFormat - liftIO $ formatStr $ cs $ case format of - OutputJSON -> cs $ jsn tx - _ -> YAML.encode $ val tx - where - val = encodeTxJSON - jsn = JSON.encodePretty . val - -cmdVersion :: Handler () -cmdVersion = liftIO $ do - putStrLn $ unwords [ "network :", cs networkName ] - putStrLn $ unwords [ "user-agent:", cs haskoinUserAgent ] - -cmdStatus :: Handler () -cmdStatus = do - v <- R.asks configVerbose - resE <- sendZmq (NodeActionReq NodeActionStatus) - handleResponse resE $ mapM_ (liftIO . putStrLn) . printNodeStatus v - -cmdKeyPair :: Handler () -cmdKeyPair = do - (pub, sec) <- curveKeyPair - liftIO $ do - B8.putStrLn $ B8.unwords [ "public :", rvalue pub ] - B8.putStrLn $ B8.unwords [ "private:", rvalue sec ] - -cmdBlockInfo :: [String] -> Handler () -cmdBlockInfo headers = do - -- Show best block if no arguments are provided - hashL <- if null headers then - -- Fetch best block hash from status msg, and return as list - (: []) . parseRes <$> sendZmq (NodeActionReq NodeActionStatus) - else - return (map fromString headers) - sendZmq (BlockInfoReq hashL) >>= - \resE -> handleResponse resE (liftIO . printResults) - where - printResults :: [BlockInfo] -> IO () - printResults = mapM_ $ putStrLn . unlines . printBlockInfo - parseRes :: Either String (WalletResponse NodeStatus) -> BlockHash - parseRes = nodeStatusBestHeader . fromMaybe - (error "No response to NodeActionStatus msg") . parseResponse - --- Do not reuse a dice roll to generate mnemonics because: --- (D xor B) xor (D xor C) = (D xor D) xor (B xor C) = (B xor C) --- You will leak (B xor C) which is the xor of your computer entropy. --- In other words, don't reuse any part of a one time pad. -cmdDice :: String -> Handler () -cmdDice rolls = case diceToEntropy rolls of - Right ent1 -> do - -- Get more entropy from /dev/urandom - ent2 <- liftIO $ getEntropy 32 - -- Mix the entropy using xor and generate a mnemonic - case toMnemonic $ mixEntropy ent1 ent2 of - Right ms -> liftIO $ putStrLn $ cs ms - Left err -> error err - Left err -> error err - --- Mix entropy of same length by xoring them -mixEntropy :: BS.ByteString -> BS.ByteString -> BS.ByteString -mixEntropy ent1 ent2 - | BS.length ent1 == BS.length ent2 = BS.pack $ BS.zipWith xor ent1 ent2 - | otherwise = error "Entropy is not of the same length" - -diceToMnemonic :: String -> Either String BS.ByteString -diceToMnemonic = toMnemonic <=< diceToEntropy - --- Transform 99 dice rolls (255.9 bits of entropy) into zero padded 32 bytes -diceToEntropy :: String -> Either String BS.ByteString -diceToEntropy rolls - | length rolls /= 99 = Left "99 dice rolls are required" - | otherwise = do - ent <- maybeToEither "Could not decode base6" $ decodeBase6 $ cs rolls - -- This check should probably never trigger - when (BS.length ent > 32) $ Left "Invalid entropy length" - let z = BS.replicate (32 - BS.length ent) 0x00 - return $ BS.append z ent - -b6Data :: BS.ByteString -b6Data = "612345" - -b6' :: Char -> Maybe Int -b6' = flip B8.elemIndex b6Data - -decodeBase6 :: BS.ByteString -> Maybe BS.ByteString -decodeBase6 t - | BS.null t = Just BS.empty - | otherwise = integerToBS <$> decodeBase6I t - -decodeBase6I :: BS.ByteString -> Maybe Integer -decodeBase6I bs = case resM of - Just (i,[]) -> return i - _ -> Nothing - where - resM = listToMaybe $ readInt 6 (isJust . b6') f $ cs bs - f = fromMaybe (error "Could not decode base6") . b6' - -{- Helpers -} - -handleNotif :: OutputFormat -> Either String Notif -> IO () -handleNotif _ (Left e) = error e -handleNotif fmt (Right notif) = case fmt of - OutputJSON -> formatStr $ cs $ - JSON.encodePretty notif - OutputYAML -> do - putStrLn "---" - formatStr $ cs $ YAML.encode notif - putStrLn "..." - OutputNormal -> - putStrLn $ printNotif notif - -parseResponse - :: Either String (WalletResponse a) - -> Maybe a -parseResponse resE = case resE of - Right (ResponseValid resM) -> resM - Right (ResponseError err) -> error $ T.unpack err - Left err -> error err - -handleResponse - :: (FromJSON a, ToJSON a) - => Either String (WalletResponse a) - -> (a -> Handler ()) - -> Handler () -handleResponse resE handle = case parseResponse resE of - Just a -> formatOutput a =<< R.asks configFormat - Nothing -> return () - where - formatOutput a format = case format of - OutputJSON -> liftIO . formatStr $ cs $ - JSON.encodePretty a - OutputYAML -> liftIO . formatStr $ cs $ YAML.encode a - OutputNormal -> handle a - -sendZmq :: (FromJSON a, ToJSON a) - => WalletRequest - -> Handler (Either String (WalletResponse a)) -sendZmq req = do - cfg <- R.ask - let msg = cs $ encode req - when (configVerbose cfg) $ liftIO $ - B8.hPutStrLn stderr $ "Outgoing JSON: " `mappend` msg - -- TODO: If I do this in the same thread I have to ^C twice to exit - a <- async $ liftIO $ withContext $ \ctx -> - withSocket ctx Req $ \sock -> do - setLinger (restrict (0 :: Int)) sock - setupAuth cfg sock - connect sock (configConnect cfg) - send sock [] (cs $ encode req) - eitherDecode . cs <$> receive sock - wait a - -setupAuth :: (SocketType t) - => Config - -> Socket t - -> IO () -setupAuth cfg sock = do - let clientKeyM = configClientKey cfg - clientKeyPubM = configClientKeyPub cfg - serverKeyPubM = configServerKeyPub cfg - forM_ clientKeyM $ \clientKey -> do - let serverKeyPub = fromMaybe - (error "Server public key not provided") - serverKeyPubM - clientKeyPub = fromMaybe - (error "Client public key not provided") - clientKeyPubM - setCurveServerKey TextFormat serverKeyPub sock - setCurvePublicKey TextFormat clientKeyPub sock - setCurveSecretKey TextFormat clientKey sock - -formatStr :: String -> IO () -formatStr str = forM_ (lines str) putStrLn - -encodeTxJSON :: Tx -> Value -encodeTxJSON tx = object - [ "txid" .= (cs $ txHashToHex (txHash tx) :: T.Text) - , "version" .= txVersion tx - , "inputs" .= map encodeTxInJSON (txIn tx) - , "outputs" .= map encodeTxOutJSON (txOut tx) - , "locktime" .= txLockTime tx - ] - -encodeTxInJSON :: TxIn -> Value -encodeTxInJSON (TxIn o s i) = object $ - [ "outpoint" .= encodeOutPointJSON o - , "sequence" .= i - , "raw-script" .= (cs $ encodeHex s :: T.Text) - , "script" .= encodeScriptJSON sp - ] ++ decoded - where - sp = either (const $ Script []) id $ S.decode s - decoded = either (const []) f $ decodeInputBS s - f inp = ["decoded-script" .= encodeScriptInputJSON inp] - -encodeTxOutJSON :: TxOut -> Value -encodeTxOutJSON (TxOut v s) = object $ - [ "value" .= v - , "raw-script" .= (cs $ encodeHex s :: T.Text) - , "script" .= encodeScriptJSON sp - ] ++ decoded - where - sp = either (const $ Script []) id $ S.decode s - decoded = either (const []) - (\out -> ["decoded-script" .= encodeScriptOutputJSON out]) - (decodeOutputBS s) - -encodeOutPointJSON :: OutPoint -> Value -encodeOutPointJSON (OutPoint h i) = object - [ "txid" .= (cs $ txHashToHex h :: T.Text) - , "pos" .= i - ] - -encodeScriptJSON :: Script -> Value -encodeScriptJSON (Script ops) = - toJSON $ map f ops - where - f (OP_PUSHDATA bs _) = String $ T.pack $ unwords - ["OP_PUSHDATA", cs $ encodeHex bs] - f x = String $ T.pack $ show x - -encodeScriptInputJSON :: ScriptInput -> Value -encodeScriptInputJSON si = case si of - RegularInput (SpendPK s) -> object - [ "spendpubkey" .= object [ "sig" .= encodeSigJSON s ] ] - RegularInput (SpendPKHash s p) -> object - [ "spendpubkeyhash" .= object - [ "sig" .= encodeSigJSON s - , "pubkey" .= (cs $ encodeHex (S.encode p) :: T.Text) - , "sender-address" .= (cs $ addrToBase58 (pubKeyAddr p) :: T.Text) - ] - ] - RegularInput (SpendMulSig sigs) -> object - [ "spendmulsig" .= object [ "sigs" .= map encodeSigJSON sigs ] ] - ScriptHashInput s r -> object - [ "spendscripthash" .= object - [ "scriptinput" .= encodeScriptInputJSON (RegularInput s) - , "redeem" .= encodeScriptOutputJSON r - , "raw-redeem" .= (cs $ encodeHex (encodeOutputBS r) :: T.Text) - , "sender-address" .= (cs $ addrToBase58 (p2shAddr r) :: T.Text) - ] - ] - -encodeScriptOutputJSON :: ScriptOutput -> Value -encodeScriptOutputJSON so = - case so of - PayPK p -> - object - [ "pay2pubkey" .= - object ["pubkey" .= (cs $ encodeHex (S.encode p) :: T.Text)] - ] - PayPKHash h -> - object - [ "pay2pubkeyhash" .= - object - [ "address-hex" .= (cs $ encodeHex (S.encode h) :: T.Text) - , "address-base58" .= - (cs (addrToBase58 (PubKeyAddress h)) :: T.Text) - ] - ] - PayMulSig ks r -> - object - [ "pay2mulsig" .= - object - [ "required-keys" .= r - , "pubkeys" .= - (map (cs . encodeHex . S.encode) ks :: [T.Text]) - ] - ] - PayScriptHash h -> - object - [ "pay2scripthash" .= - object - [ "address-hex" .= (cs $ encodeHex $ S.encode h :: T.Text) - , "address-base58" .= - (cs (addrToBase58 (ScriptAddress h)) :: T.Text) - ] - ] - DataCarrier bs -> - object - [ "op_return" .= - object ["data" .= (cs $ encodeHex bs :: T.Text)] - ] - -encodeSigJSON :: TxSignature -> Value -encodeSigJSON ts@(TxSignature _ sh) = object - [ "raw-sig" .= (cs $ encodeHex (encodeSig ts) :: T.Text) - , "sighash" .= encodeSigHashJSON sh - ] - -encodeSigHashJSON :: SigHash -> Value -encodeSigHashJSON sh = case sh of - SigAll acp -> object - [ "type" .= String "SigAll" - , "acp" .= acp - ] - SigNone acp -> object - [ "type" .= String "SigNone" - , "acp" .= acp - ] - SigSingle acp -> object - [ "type" .= String "SigSingle" - , "acp" .= acp - ] - SigUnknown acp v -> object - [ "type" .= String "SigUnknown" - , "acp" .= acp - , "value" .= v - ] - -{- Print utilities -} - -printAccount :: JsonAccount -> String -printAccount JsonAccount{..} = unlines $ - [ "Account : " ++ T.unpack jsonAccountName - , "Type : " ++ showType - , "Gap : " ++ show jsonAccountGap - ] - ++ - [ "Index : " ++ show i | i <- childLs ] - ++ - [ "Mnemonic: " ++ cs ms - | ms <- maybeToList jsonAccountMnemonic - ] - ++ - concat [ printKeys | not (null jsonAccountKeys) ] - where - childLs = case jsonAccountType of - AccountRegular -> map xPubChild jsonAccountKeys - _ -> maybeToList $ xPrvChild <$> jsonAccountMaster - printKeys = - ("Keys : " ++ cs (xPubExport (head jsonAccountKeys))) : - map ((" " ++) . cs . xPubExport) (tail jsonAccountKeys) - showType = case jsonAccountType of - AccountRegular -> if isNothing jsonAccountMaster - then "Read-Only" else "Regular" - AccountMultisig m n -> unwords - [ if isNothing jsonAccountMaster - then "Read-Only Multisig" else "Multisig" - , show m, "of", show n - ] - -printAddress :: Bool -> JsonAddr -> String -printAddress displayPubKey JsonAddr{..} = unwords $ - [ show jsonAddrIndex, ":", cs dat ] - ++ - [ "(" ++ T.unpack jsonAddrLabel ++ ")" - | not (null $ T.unpack jsonAddrLabel) - ] - ++ concat - [ [ "[Received: " ++ show (balanceInfoInBalance bal) ++ "]" - , "[Coins: " ++ show (balanceInfoCoins bal) ++ "]" - , "[Spent Coins: " ++ show (balanceInfoSpentCoins bal) ++ "]" - ] - | isJust jsonAddrBalance && balanceInfoCoins bal > 0 - ] - where - dat | displayPubKey = - maybe "" (encodeHex . S.encode) jsonAddrKey - | otherwise = addrToBase58 jsonAddrAddress - bal = fromMaybe (error "Could not get address balance") jsonAddrBalance - -printNotif :: Notif -> String -printNotif (NotifTx tx) = printTx Nothing tx -printNotif (NotifBlock b) = printBlock b - -printTx :: Maybe Address -> JsonTx -> String -printTx aM tx@JsonTx{..} = unlines $ - [ "Id : " ++ cs (txHashToHex jsonTxHash) ] - ++ - [ "Value : " ++ printTxType jsonTxType ++ " " ++ show jsonTxValue ] - ++ - [ "Confidence : " ++ printTxConfidence tx ] - ++ concat - [ printAddrInfos "Inputs : " jsonTxInputs - | not (null jsonTxInputs) - ] - ++ concat - [ printAddrInfos "Outputs : " jsonTxOutputs - | not (null jsonTxOutputs) - ] - ++ concat - [ printAddrInfos "Change : " jsonTxChange - | not (null jsonTxChange) - ] - where - printAddrInfos header xs = - (header ++ f (head xs)) : - map ((" " ++) . f) (tail xs) - f (AddressInfo addr valM local) = unwords $ - cs (addrToBase58 addr) : - [ show v | v <- maybeToList valM ] - ++ - [ "<-" | maybe local (== addr) aM ] - -printTxConfidence :: JsonTx -> String -printTxConfidence JsonTx{..} = case jsonTxConfidence of - TxBuilding -> "Building" ++ confirmations - TxPending -> "Pending" ++ confirmations - TxDead -> "Dead" ++ confirmations - TxOffline -> "Offline" - where - confirmations = case jsonTxConfirmations of - Just conf -> " (Confirmations: " ++ show conf ++ ")" - _ -> "" - -printTxType :: TxType -> String -printTxType t = case t of - TxIncoming -> "Incoming" - TxOutgoing -> "Outgoing" - TxSelf -> "Self" - -printBlock :: JsonBlock -> String -printBlock JsonBlock{..} = unlines - [ "Block Hash : " ++ cs (blockHashToHex jsonBlockHash) - , "Block Height : " ++ show jsonBlockHeight - , "Previous block : " ++ cs (blockHashToHex jsonBlockPrev) - ] - -printSyncBlock :: JsonSyncBlock -> String -printSyncBlock JsonSyncBlock{..} = unlines - [ "Block Hash : " ++ cs (blockHashToHex jsonSyncBlockHash) - , "Block Height : " ++ show jsonSyncBlockHeight - , "Previous block : " ++ cs (blockHashToHex jsonSyncBlockPrev) - , "Transactions : " ++ show (length jsonSyncBlockTxs) - ] - -printNodeStatus :: Bool -> NodeStatus -> [String] -printNodeStatus verbose NodeStatus{..} = - [ "Network Height : " ++ show nodeStatusNetworkHeight - , "Best Header : " ++ cs (blockHashToHex nodeStatusBestHeader) - , "Best Header Height: " ++ show nodeStatusBestHeaderHeight - , "Best Block : " ++ cs (blockHashToHex nodeStatusBestBlock) - , "Best Block Height : " ++ show nodeStatusBestBlockHeight - , "Bloom Filter Size : " ++ show nodeStatusBloomSize - ] ++ - [ "Header Peer : " ++ show h - | h <- maybeToList nodeStatusHeaderPeer, verbose - ] ++ - [ "Merkle Peer : " ++ show m - | m <- maybeToList nodeStatusMerklePeer, verbose - ] ++ - [ "Pending Headers : " ++ show nodeStatusHaveHeaders | verbose ] ++ - [ "Pending Tickles : " ++ show nodeStatusHaveTickles | verbose ] ++ - [ "Pending Txs : " ++ show nodeStatusHaveTxs | verbose ] ++ - [ "Pending GetData : " ++ show (map txHashToHex nodeStatusGetData) - | verbose - ] ++ - [ "Pending Rescan : " ++ show r - | r <- maybeToList nodeStatusRescan, verbose - ] ++ - [ "Synced Mempool : " ++ show nodeStatusMempool | verbose ] ++ - [ "HeaderSync Lock : " ++ show nodeStatusSyncLock | verbose ] ++ - [ "Peers: " ] ++ - intercalate ["-"] (map (printPeerStatus verbose) nodeStatusPeers) - -printPeerStatus :: Bool -> PeerStatus -> [String] -printPeerStatus verbose PeerStatus{..} = - [ " Peer Id : " ++ show peerStatusPeerId - , " Peer Host: " ++ peerHostString peerStatusHost - , " Connected: " ++ if peerStatusConnected then "yes" else "no" - , " Height : " ++ show peerStatusHeight - ] ++ - [ " Protocol : " ++ show p | p <- maybeToList peerStatusProtocol - ] ++ - [ " UserAgent: " ++ ua | ua <- maybeToList peerStatusUserAgent - ] ++ - [ " Avg Ping : " ++ p | p <- maybeToList peerStatusPing - ] ++ - [ " DoS Score: " ++ show d | d <- maybeToList peerStatusDoSScore - ] ++ - [ " Merkles : " ++ show peerStatusHaveMerkles | verbose ] ++ - [ " Messages : " ++ show peerStatusHaveMessage | verbose ] ++ - [ " Nonces : " ++ show peerStatusPingNonces | verbose ] ++ - [ " Reconnect: " ++ show t - | t <- maybeToList peerStatusReconnectTimer, verbose - ] ++ - [ " Logs : " | verbose ] ++ - [ " - " ++ msg | msg <- fromMaybe [] peerStatusLog, verbose] - -printBlockInfo :: BlockInfo -> [String] -printBlockInfo BlockInfo{..} = - [ "Block Height : " ++ show blockInfoHeight - , "Block Hash : " ++ cs (blockHashToHex blockInfoHash) - , "Block Timestamp : " ++ formatUTCTime blockInfoTimestamp - , "Previous Block : " ++ cs (blockHashToHex blockInfoPrevBlock) - , "Merkle Root : " ++ cs blockInfoMerkleRoot - , "Block Version : " ++ "0x" ++ cs (encodeHex versionData) - , "Block Difficulty : " ++ show (blockDiff blockInfoBits) - , "Chain Work : " ++ show blockInfoChainWork - ] - where - blockDiff :: Word32 -> Double - blockDiff target = getTarget (blockBits genesisHeader) / getTarget target - getTarget = fromIntegral . fst . decodeCompact - versionData = integerToBS (fromIntegral blockInfoVersion) - formatUTCTime = Time.formatTime Time.defaultTimeLocale - "%Y-%m-%d %H:%M:%S (UTC)" diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Client/PrettyJson.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Client/PrettyJson.hs deleted file mode 100644 index 42673390..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Client/PrettyJson.hs +++ /dev/null @@ -1,23 +0,0 @@ -{-# LANGUAGE CPP #-} -module Network.Haskoin.Wallet.Client.PrettyJson -( encodePretty ) -where - -import qualified Data.ByteString.Lazy as BL -import Data.Aeson (ToJSON) -import Data.Aeson.Encode.Pretty as Export (Config (..), - defConfig, - encodePretty') - --- aeson-pretty 0.8.0 introduces a new way to specify indentation -#if MIN_VERSION_aeson_pretty(0,8,0) -import Data.Aeson.Encode.Pretty as Export (Indent(..)) -jsonIndent :: Indent -jsonIndent = Spaces 2 -#else -jsonIndent :: Int -jsonIndent = 2 -#endif - -encodePretty :: ToJSON a => a -> BL.ByteString -encodePretty = encodePretty' defConfig{ confIndent = jsonIndent } diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Database.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Database.hs deleted file mode 100644 index 6670bef9..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Database.hs +++ /dev/null @@ -1,17 +0,0 @@ -module Network.Haskoin.Wallet.Database where - -import Control.Monad.Logger (MonadLoggerIO) -import Control.Monad.Trans.Control (MonadBaseControl) - -import Database.Persist.Sql (ConnectionPool) -import Database.Persist.Sqlite (SqliteConf(..), createSqlitePool) - -type DatabaseConfType = SqliteConf - -getDatabasePool :: (MonadLoggerIO m, MonadBaseControl IO m) - => DatabaseConfType -> m ConnectionPool -getDatabasePool conf = createSqlitePool (sqlDatabase conf) (sqlPoolSize conf) - -paramLimit :: Int -paramLimit = 20 - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Internals.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Internals.hs deleted file mode 100644 index 9706e4d9..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Internals.hs +++ /dev/null @@ -1,31 +0,0 @@ -{-| -This module expose haskoin-wallet internals. No guarantee is made on the -stability of the interface of these internal modules. --} - -module Network.Haskoin.Wallet.Internals -( module Network.Haskoin.Wallet -, module Network.Haskoin.Wallet.Accounts -, module Network.Haskoin.Wallet.Transaction -, module Network.Haskoin.Wallet.Client -, module Network.Haskoin.Wallet.Client.Commands -, module Network.Haskoin.Wallet.Server -, module Network.Haskoin.Wallet.Server.Handler -, module Network.Haskoin.Wallet.Settings -, module Network.Haskoin.Wallet.Database -, module Network.Haskoin.Wallet.Types -, module Network.Haskoin.Wallet.Model -) where - -import Network.Haskoin.Wallet -import Network.Haskoin.Wallet.Accounts -import Network.Haskoin.Wallet.Transaction -import Network.Haskoin.Wallet.Client -import Network.Haskoin.Wallet.Client.Commands -import Network.Haskoin.Wallet.Server -import Network.Haskoin.Wallet.Server.Handler -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Database -import Network.Haskoin.Wallet.Types -import Network.Haskoin.Wallet.Model - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Model.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Model.hs deleted file mode 100644 index fe3fa784..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Model.hs +++ /dev/null @@ -1,132 +0,0 @@ -module Network.Haskoin.Wallet.Model -( -- Database types - Account(..) -, AccountId -, WalletAddr(..) -, WalletAddrId -, WalletState(..) -, WalletStateId -, WalletCoin(..) -, WalletCoinId -, SpentCoin(..) -, SpentCoinId -, WalletTx(..) -, WalletTxId -, EntityField(..) -, Unique(..) -, migrateWallet - --- JSON conversion -, toJsonAccount -, toJsonAddr -, toJsonCoin -, toJsonTx - -) where - -import Data.Word (Word32, Word64) -import Data.Time (UTCTime) -import Data.Text (Text) -import Data.String.Conversions (cs) - -import Database.Persist (EntityField, Unique) -import Database.Persist.Quasi (lowerCaseSettings) -import Database.Persist.TH - ( share - , mkPersist - , sqlSettings - , mkMigrate - , persistFileWith - ) - -import Network.Haskoin.Wallet.Types -import Network.Haskoin.Block -import Network.Haskoin.Transaction -import Network.Haskoin.Script -import Network.Haskoin.Crypto -import Network.Haskoin.Network - -share [ mkPersist sqlSettings - , mkMigrate "migrateWallet" - ] - $(persistFileWith lowerCaseSettings "config/models") - -{- JSON Types -} - -toJsonAccount :: Maybe Mnemonic -> Account -> JsonAccount -toJsonAccount msM acc = JsonAccount - { jsonAccountName = accountName acc - , jsonAccountType = accountType acc - , jsonAccountMnemonic = fmap cs msM - , jsonAccountMaster = accountMaster acc - , jsonAccountKeys = accountKeys acc - , jsonAccountGap = accountGap acc - , jsonAccountCreated = accountCreated acc - } - -toJsonAddr :: WalletAddr - -> Maybe BalanceInfo - -> JsonAddr -toJsonAddr addr balM = JsonAddr - { jsonAddrAddress = walletAddrAddress addr - , jsonAddrIndex = walletAddrIndex addr - , jsonAddrType = walletAddrType addr - , jsonAddrLabel = walletAddrLabel addr - , jsonAddrRedeem = walletAddrRedeem addr - , jsonAddrKey = walletAddrKey addr - , jsonAddrCreated = walletAddrCreated addr - , jsonAddrBalance = balM - } - -toJsonTx :: AccountName - -> Maybe (BlockHash, BlockHeight) -- ^ Current best block - -> WalletTx - -> JsonTx -toJsonTx acc bbM tx = JsonTx - { jsonTxHash = walletTxHash tx - , jsonTxNosigHash = walletTxNosigHash tx - , jsonTxType = walletTxType tx - , jsonTxInValue = walletTxInValue tx - , jsonTxOutValue = walletTxOutValue tx - , jsonTxValue = fromIntegral (walletTxInValue tx) - - fromIntegral (walletTxOutValue tx) - , jsonTxInputs = walletTxInputs tx - , jsonTxOutputs = walletTxOutputs tx - , jsonTxChange = walletTxChange tx - , jsonTxTx = walletTxTx tx - , jsonTxIsCoinbase = walletTxIsCoinbase tx - , jsonTxConfidence = walletTxConfidence tx - , jsonTxConfirmedBy = walletTxConfirmedBy tx - , jsonTxConfirmedHeight = walletTxConfirmedHeight tx - , jsonTxConfirmedDate = walletTxConfirmedDate tx - , jsonTxCreated = walletTxCreated tx - , jsonTxAccount = acc - , jsonTxConfirmations = f =<< walletTxConfirmedHeight tx - , jsonTxBestBlock = fst <$> bbM - , jsonTxBestBlockHeight = snd <$> bbM - } - where - f confirmedHeight = case bbM of - Just (_, h) -> return $ fromInteger $ - max 0 $ toInteger h - toInteger confirmedHeight + 1 - _ -> Nothing - -toJsonCoin :: WalletCoin - -> Maybe JsonTx -- ^ Coin’s transaction - -> Maybe JsonAddr -- ^ Coin’s address - -> Maybe JsonTx -- ^ Coin’s spending transaction - -> JsonCoin -toJsonCoin coin txM addrM spendM = JsonCoin - { jsonCoinHash = walletCoinHash coin - , jsonCoinPos = walletCoinPos coin - , jsonCoinValue = walletCoinValue coin - , jsonCoinScript = walletCoinScript coin - , jsonCoinCreated = walletCoinCreated coin - -- Optional tx - , jsonCoinTx = txM - -- Optional address - , jsonCoinAddress = addrM - -- Optional spending tx - , jsonCoinSpendingTx = spendM - } - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Server.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Server.hs deleted file mode 100644 index 4210fa1f..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Server.hs +++ /dev/null @@ -1,502 +0,0 @@ -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Wallet.Server -( runSPVServer -, runSPVServerWithContext -) where - -import Control.Concurrent.Async.Lifted (async, link, - waitAnyCancel) -import Control.Concurrent.STM (atomically, retry) -import Control.Concurrent.STM.TBMChan (TBMChan, newTBMChan, - readTBMChan) -import Control.DeepSeq (NFData (..)) -import Control.Exception.Lifted (ErrorCall (..), - SomeException (..), - catches) -import qualified Control.Exception.Lifted as E (Handler (..)) -import Control.Monad (forM_, forever, unless, - void, when) -import Control.Monad.Base (MonadBase) -import Control.Monad.Catch (MonadThrow) -import Control.Monad.Fix (fix) -import Control.Monad.Logger (MonadLoggerIO, - filterLogger, logDebug, - logError, logInfo, - logWarn, - runStdoutLoggingT) -import Control.Monad.Trans (lift, liftIO) -import Control.Monad.Trans.Control (MonadBaseControl, - liftBaseOpDiscard) -import Control.Monad.Trans.Resource (MonadResource, - runResourceT) -import Data.Aeson (Value, decode, encode) -import Data.ByteString (ByteString) -import qualified Data.ByteString.Lazy as BL (fromStrict, - toStrict) -import Data.Conduit (await, awaitForever, - ($$)) -import qualified Data.HashMap.Strict as H (lookup) -import Data.List.NonEmpty (NonEmpty ((:|))) -import qualified Data.Map.Strict as M (Map, assocs, elems, - empty, - fromListWith, null, - unionWith) -import Data.Maybe (fromJust, fromMaybe, - isJust) -import Data.Monoid ((<>)) -import Data.String.Conversions (cs) -import Data.Text (pack) -import Data.Word (Word32) -import Database.Esqueleto (from, val, where_, - (&&.), (<=.), (==.), - (^.)) -import Database.Persist.Sql (ConnectionPool, - runMigration) -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Node.BlockChain -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Node.Peer -import Network.Haskoin.Node.STM -import Network.Haskoin.Transaction -import Network.Haskoin.Wallet.Accounts -import Network.Haskoin.Wallet.Database -import Network.Haskoin.Wallet.Model -import Network.Haskoin.Wallet.Server.Handler -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Transaction -import Network.Haskoin.Wallet.Types -import System.Posix.Daemon (Redirection (ToFile), - runDetached) -import System.ZMQ4 (Context, KeyFormat (..), - Pub (..), Rep (..), - Socket, bind, receive, - receiveMulti, restrict, - send, sendMulti, - setCurveSecretKey, - setCurveServer, - setLinger, withContext, - withSocket, z85Decode) - -data EventSession = EventSession - { eventBatchSize :: !Int - , eventNewAddrs :: !(M.Map AccountId Word32) - } - deriving (Eq, Show, Read) - -instance NFData EventSession where - rnf EventSession{..} = - rnf eventBatchSize `seq` - rnf (M.elems eventNewAddrs) - -runSPVServer :: Config -> IO () -runSPVServer cfg = maybeDetach cfg $ -- start the server process - withContext (run . runSPVServerWithContext cfg) - where - -- Setup logging monads - run = runResourceT . runLogging - runLogging = runStdoutLoggingT . filterLogger logFilter - logFilter _ level = level >= configLogLevel cfg - --- |Run the server, and use the specifed ZeroMQ context. --- Useful if you want to communicate with the server using --- the "inproc" ZeroMQ transport, where a shared context is --- required. -runSPVServerWithContext :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => Config -> Context -> m () -runSPVServerWithContext cfg ctx = do - -- Initialize the database - -- Check the operation mode of the server. - pool <- initDatabase cfg - -- Notification channel - notif <- liftIO $ atomically $ newTBMChan 1000 - case configMode cfg of - -- In this mode, we do not launch an SPV node. We only accept - -- client requests through the ZMQ API. - SPVOffline -> do - let session = HandlerSession cfg pool Nothing notif - as <- mapM async - -- Run the ZMQ API-command server - [ runWalletCmd ctx session - -- Run the ZMQ notification thread - , runWalletNotif ctx session - ] - mapM_ link as - (_,r) <- waitAnyCancel as - return r - -- In this mode, we launch the client ZMQ API and we sync the - -- wallet database with an SPV node. - SPVOnline -> do - -- Initialize the node state - node <- getNodeState (Right pool) - -- Spin up the node threads - let session = HandlerSession cfg pool (Just node) notif - as <- mapM async - -- Start the SPV node - [ runNodeT (spv pool) node - -- Merkle block synchronization - , runNodeT (runMerkleSync pool notif) node - -- Import solo transactions as they arrive from peers - , runNodeT (txSource $$ processTx pool notif) node - -- Respond to transaction GetData requests - , runNodeT (handleGetData $ (`runDBPool` pool) . getTx) node - -- Re-broadcast pending transactions - , runNodeT (broadcastPendingTxs pool) node - -- Run the ZMQ API-command server - , runWalletCmd ctx session - -- Run the ZMQ notification thread - , runWalletNotif ctx session - ] - mapM_ link as - (_,r) <- waitAnyCancel as - $(logDebug) "Exiting main thread" - return r - where - spv pool = do - -- Get our bloom filter - (bloom, elems, _) <- runDBPool getBloomFilter pool - startSPVNode hosts bloom elems - -- Bitcoin nodes to connect to - nodes = fromMaybe - (error $ "BTC nodes for " ++ networkName ++ " not found") - (pack networkName `H.lookup` configBTCNodes cfg) - hosts = map (\x -> PeerHost (btcNodeHost x) (btcNodePort x)) nodes - -- Run the merkle syncing thread - runMerkleSync pool notif = do - $(logDebug) "Waiting for a valid bloom filter for merkle downloads..." - - -- Only download merkles if we have a valid bloom filter - _ <- atomicallyNodeT waitBloomFilter - - -- Provide a fast catchup time if we are at height 0 - fcM <- fmap (fmap adjustFCTime) $ (`runDBPool` pool) $ do - (_, h) <- walletBestBlock - if h == 0 then firstAddrTime else return Nothing - maybe (return ()) (atomicallyNodeT . rescanTs) fcM - - -- Start the merkle sync - merkleSync pool 500 notif - $(logDebug) "Exiting Merkle-sync thread" - -- Run a thread that will re-broadcast pending transactions - broadcastPendingTxs pool = forever $ do - (hash, _) <- runSqlNodeT $ walletBestBlock - -- Wait until we are synced - atomicallyNodeT $ do - synced <- areBlocksSynced hash - unless synced $ lift retry - -- Send an INV for those transactions to all peers - broadcastTxs =<< runDBPool (getPendingTxs 0) pool - -- Wait until we are not synced - atomicallyNodeT $ do - synced <- areBlocksSynced hash - when synced $ lift retry - $(logDebug) "Exiting tx-broadcast thread" - processTx pool notif = do - awaitForever $ \tx -> lift $ do - (_, newAddrs) <- runDBPool (importNetTx tx (Just notif)) pool - unless (null newAddrs) $ do - $(logInfo) $ pack $ unwords - [ "Generated", show $ length newAddrs - , "new addresses while importing the tx." - , "Updating the bloom filter" - ] - (bloom, elems, _) <- runDBPool getBloomFilter pool - atomicallyNodeT $ sendBloomFilter bloom elems - $(logDebug) "Exiting tx-import thread" - -initDatabase :: (MonadBaseControl IO m, MonadLoggerIO m) - => Config -> m ConnectionPool -initDatabase cfg = do - -- Create a database pool - let dbCfg = fromMaybe - (error $ "DB config settings for " ++ networkName ++ " not found") - (pack networkName `H.lookup` configDatabase cfg) - pool <- getDatabasePool dbCfg - -- Initialize wallet database - flip runDBPool pool $ do - _ <- runMigration migrateWallet - _ <- runMigration migrateHeaderTree - initWallet $ configBloomFP cfg - return pool - -merkleSync - :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m, MonadResource m) - => ConnectionPool - -> Word32 - -> TBMChan Notif - -> NodeT m () -merkleSync pool bSize notif = do - -- Get our best block - (hash, _) <- runDBPool walletBestBlock pool - $(logDebug) "Starting merkle batch download" - -- Wait for a new block or a rescan - (action, source) <- merkleDownload hash bSize - $(logDebug) "Received a merkle action and source. Processing the source..." - - -- Read and process the data from the source - (lastMerkleM, mTxsAcc, aMap) <- source $$ go Nothing [] M.empty - $(logDebug) "Merkle source processed and closed" - - -- Send a new bloom filter to our peers if new addresses were generated - unless (M.null aMap) $ do - $(logInfo) $ pack $ unwords - [ "Generated", show $ sum $ M.elems aMap - , "new addresses while importing the merkle block." - , "Sending our bloom filter." - ] - (bloom, elems, _) <- runDBPool getBloomFilter pool - atomicallyNodeT $ sendBloomFilter bloom elems - - -- Check if we should rescan the current merkle batch - $(logDebug) "Checking if we need to rescan the current batch..." - rescan <- shouldRescan aMap - when rescan $ $(logDebug) "We need to rescan the current batch" - -- Compute the new batch size - let newBSize | rescan = max 1 $ bSize `div` 2 - | otherwise = min 500 $ bSize + max 1 (bSize `div` 20) - - when (newBSize /= bSize) $ $(logDebug) $ pack $ unwords - [ "Changing block batch size from", show bSize, "to", show newBSize ] - - -- Did we receive all the merkles that we asked for ? - let missing = (headerHash <$> lastMerkleM) /= - Just (nodeHash $ last $ actionNodes action) - - when missing $ $(logWarn) $ pack $ unwords - [ "Merkle block stream closed prematurely" - , show lastMerkleM - ] - - -- TODO: We could still salvage a partially received batch - unless (rescan || missing) $ do - $(logDebug) "Importing merkles into the wallet..." - -- Confirm the transactions - runDBPool (importMerkles action mTxsAcc (Just notif)) pool - $(logDebug) "Done importing merkles into the wallet" - logBlockChainAction action - - merkleSync pool newBSize notif - where - go lastMerkleM mTxsAcc aMap = await >>= \resM -> case resM of - Just (Right tx) -> do - $(logDebug) $ pack $ unwords - [ "Importing merkle tx", cs $ txHashToHex $ txHash tx ] - (_, newAddrs) <- lift $ runDBPool (importNetTx tx Nothing) pool - $(logDebug) $ pack $ unwords - [ "Generated", show $ length newAddrs - , "new addresses while importing tx" - , cs $ txHashToHex $ txHash tx - ] - let newMap = M.unionWith (+) aMap $ groupByAcc newAddrs - go lastMerkleM mTxsAcc newMap - Just (Left (MerkleBlock mHead _ _ _, mTxs)) -> do - $(logDebug) $ pack $ unwords - [ "Buffering merkle block" - , cs $ blockHashToHex $ headerHash mHead - ] - go (Just mHead) (mTxs:mTxsAcc) aMap - -- Done processing this batch. Reverse mTxsAcc as we have been - -- prepending new values to it. - _ -> return (lastMerkleM, reverse mTxsAcc, aMap) - groupByAcc addrs = - let xs = map (\a -> (walletAddrAccount a, 1)) addrs - in M.fromListWith (+) xs - shouldRescan aMap = do - -- Try to find an account whos gap is smaller than the number of new - -- addresses generated in that account. - res <- (`runDBPool` pool) $ splitSelect (M.assocs aMap) $ \ks -> - from $ \a -> do - let andCond (ai, cnt) = - a ^. AccountId ==. val ai &&. - a ^. AccountGap <=. val cnt - where_ $ join2 $ map andCond ks - return $ a ^. AccountId - return $ not $ null res - -- Some logging of the blocks - logBlockChainAction action = case action of - BestChain nodes -> $(logInfo) $ pack $ unwords - [ "Best chain height" - , show $ nodeBlockHeight $ last nodes - , "(", cs $ blockHashToHex $ nodeHash $ last nodes - , ")" - ] - ChainReorg _ o n -> $(logInfo) $ pack $ unlines $ - [ "Chain reorg." - , "Orphaned blocks:" - ] - ++ map ((" " ++) . cs . blockHashToHex . nodeHash) o - ++ [ "New blocks:" ] - ++ map ((" " ++) . cs . blockHashToHex . nodeHash) n - ++ [ unwords [ "Best merkle chain height" - , show $ nodeBlockHeight $ last n - ] - ] - SideChain n -> $(logWarn) $ pack $ unlines $ - "Side chain:" : - map ((" " ++) . cs . blockHashToHex . nodeHash) n - KnownChain n -> $(logWarn) $ pack $ unlines $ - "Known chain:" : - map ((" " ++) . cs . blockHashToHex . nodeHash) n - -maybeDetach :: Config -> IO () -> IO () -maybeDetach cfg action = - if configDetach cfg then runDetached pidFile logFile action >> logStarted else action - where - logStarted = putStrLn "Process started" - pidFile = Just $ configPidFile cfg - logFile = ToFile $ configLogFile cfg - --- Run the main ZeroMQ loop --- TODO: Support concurrent requests using DEALER socket when we can do --- concurrent MySQL requests. -runWalletNotif :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => Context -> HandlerSession -> m () -runWalletNotif ctx session = - liftBaseOpDiscard (withSocket ctx Pub) $ \sock -> do - liftIO $ setLinger (restrict (0 :: Int)) sock - setupCrypto ctx sock session - liftIO $ bind sock $ configBindNotif $ handlerConfig session - forever $ do - xM <- liftIO $ atomically $ readTBMChan $ handlerNotifChan session - forM_ xM $ \x -> - let (typ, pay) = case x of - NotifBlock _ -> - ("[block]", cs $ encode x) - NotifTx JsonTx{..} -> - ("{" <> cs jsonTxAccount <> "}", cs $ encode x) - in liftIO $ sendMulti sock $ typ :| [pay] - -runWalletCmd :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => Context -> HandlerSession -> m () -runWalletCmd ctx session = do - liftBaseOpDiscard (withSocket ctx Rep) $ \sock -> do - liftIO $ setLinger (restrict (0 :: Int)) sock - setupCrypto ctx sock session - liftIO $ bind sock $ configBind $ handlerConfig session - fix $ \loop -> do - bs <- liftIO $ receive sock - let msg = decode $ BL.fromStrict bs - res <- case msg of - Just r -> catchErrors $ - runHandler (dispatchRequest r) session - Nothing -> return $ ResponseError "Could not decode request" - liftIO $ send sock [] $ BL.toStrict $ encode res - unless (msg == Just StopServerReq) loop - $(logInfo) "Exiting ZMQ command thread..." - where - catchErrors m = catches m - [ E.Handler $ \(WalletException err) -> do - $(logError) $ pack err - return $ ResponseError $ pack err - , E.Handler $ \(ErrorCall err) -> do - $(logError) $ pack err - return $ ResponseError $ pack err - , E.Handler $ \(SomeException exc) -> do - $(logError) $ pack $ show exc - return $ ResponseError $ pack $ show exc - ] - -setupCrypto :: (MonadLoggerIO m, MonadBaseControl IO m) - => Context -> Socket a -> HandlerSession -> m () -setupCrypto ctx' sock session = do - when (isJust serverKeyM) $ liftIO $ do - let k = fromJust $ configServerKey $ handlerConfig session - setCurveServer True sock - setCurveSecretKey TextFormat k sock - when (isJust clientKeyPubM) $ do - k <- z85Decode (fromJust clientKeyPubM) - void $ async $ runZapAuth ctx' k - where - cfg = handlerConfig session - serverKeyM = configServerKey cfg - clientKeyPubM = configClientKeyPub cfg - -runZapAuth :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - ) - => Context -> ByteString -> m () -runZapAuth ctx k = do - $(logDebug) $ "Starting ØMQ authentication thread" - liftBaseOpDiscard (withSocket ctx Rep) $ \zap -> do - liftIO $ setLinger (restrict (0 :: Int)) zap - liftIO $ bind zap "inproc://zeromq.zap.01" - forever $ do - buffer <- liftIO $ receiveMulti zap - let actionE = - case buffer of - v:q:_:_:_:m:p:_ -> do - when (v /= "1.0") $ - Left (q, "500", "Version number not valid") - when (m /= "CURVE") $ - Left (q, "400", "Mechanism not supported") - when (p /= k) $ - Left (q, "400", "Invalid client public key") - return q - _ -> Left ("", "500", "Malformed request") - case actionE of - Right q -> do - $(logInfo) "Authenticated client successfully" - liftIO $ sendMulti zap $ - "1.0" :| [q, "200", "OK", "client", ""] - Left (q, c, m) -> do - $(logError) $ pack $ unwords - [ "Failed to authenticate client:" , cs c, cs m ] - liftIO $ sendMulti zap $ - "1.0" :| [q, c, m, "", ""] - - -dispatchRequest :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => WalletRequest -> Handler m (WalletResponse Value) -dispatchRequest req = fmap ResponseValid $ case req of - AccountReq n -> accountReq n - AccountsReq p -> accountsReq p - NewAccountReq na -> newAccountReq na - RenameAccountReq n n' -> renameAccountReq n n' - AddPubKeysReq n ks -> addPubKeysReq n ks - SetAccountGapReq n g -> setAccountGapReq n g - AddrsReq n t m o p -> addrsReq n t m o p - UnusedAddrsReq n t p -> unusedAddrsReq n t p - AddressReq n i t m o -> addressReq n i t m o - PubKeyIndexReq n k t -> pubKeyIndexReq n k t - SetAddrLabelReq n i t l -> setAddrLabelReq n i t l - GenerateAddrsReq n i t -> generateAddrsReq n i t - TxsReq n p -> txsReq n p - PendingTxsReq a p -> pendingTxsReq a p - DeadTxsReq a p -> deadTxsReq a p - AddrTxsReq n i t p -> addrTxsReq n i t p - CreateTxReq n c -> createTxReq n c - ImportTxReq n t -> importTxReq n t - SignTxReq n s -> signTxReq n s - TxReq n h -> txReq n h - DeleteTxReq t -> deleteTxReq t - OfflineTxReq n h -> offlineTxReq n h - SignOfflineTxReq n k t c -> signOfflineTxReq n k t c - BalanceReq n mc o -> balanceReq n mc o - NodeActionReq na -> nodeActionReq na - SyncReq a n b -> syncReq a (Right n) b - SyncHeightReq a n b -> syncReq a (Left n) b - BlockInfoReq l -> blockInfoReq l - StopServerReq -> stopServerReq - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Server/Handler.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Server/Handler.hs deleted file mode 100644 index 5e18f1e2..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Server/Handler.hs +++ /dev/null @@ -1,647 +0,0 @@ -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Wallet.Server.Handler where - -import Control.Arrow (first) -import Control.Concurrent.STM.TBMChan (TBMChan) -import Control.Exception (SomeException (..), - tryJust) -import Control.Monad (liftM, forM, unless, when) -import Control.Monad.Base (MonadBase) -import Control.Monad.Catch (MonadThrow, throwM) -import Control.Monad.Logger (MonadLoggerIO, logError, - logInfo) -import Control.Monad.Reader (ReaderT, asks, runReaderT) -import Control.Monad.Trans (MonadIO, lift, liftIO) -import Control.Monad.Trans.Control (MonadBaseControl) -import Control.Monad.Trans.Resource (MonadResource) -import Data.Aeson (Value (..), toJSON) -import qualified Data.Map.Strict as M (elems, fromList, - intersectionWith) -import Data.String.Conversions (cs) -import Data.Text (Text, pack, unpack) -import Data.Word (Word32) -import Data.Maybe (catMaybes) -import Database.Esqueleto (Entity (..), SqlPersistT) -import Database.Persist.Sql (ConnectionPool, - SqlPersistM, - runSqlPersistMPool, - runSqlPool) -import Network.Haskoin.Block -import Network.Haskoin.Crypto -import Network.Haskoin.Node.BlockChain -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Node.Peer -import Network.Haskoin.Node.STM -import Network.Haskoin.Transaction -import Network.Haskoin.Wallet.Accounts -import Network.Haskoin.Wallet.Block -import Network.Haskoin.Wallet.Model -import Network.Haskoin.Wallet.Settings -import Network.Haskoin.Wallet.Transaction -import Network.Haskoin.Wallet.Types -import Network.Haskoin.Wallet.Types.BlockInfo (fromNodeBlock) - -type Handler m = ReaderT HandlerSession m - -data HandlerSession = HandlerSession - { handlerConfig :: !Config - , handlerPool :: !ConnectionPool - , handlerNodeState :: !(Maybe SharedNodeState) - , handlerNotifChan :: !(TBMChan Notif) - } - -runHandler :: Monad m => Handler m a -> HandlerSession -> m a -runHandler = runReaderT - -runDB :: MonadBaseControl IO m => SqlPersistT m a -> Handler m a -runDB action = asks handlerPool >>= lift . runDBPool action - -runDBPool :: MonadBaseControl IO m => SqlPersistT m a -> ConnectionPool -> m a -runDBPool = runSqlPool - -tryDBPool :: MonadLoggerIO m => ConnectionPool -> SqlPersistM a -> m (Maybe a) -tryDBPool pool action = do - resE <- liftIO $ tryJust f $ runSqlPersistMPool action pool - case resE of - Right res -> return $ Just res - Left err -> do - $(logError) $ pack $ unwords [ "A database error occured:", err] - return Nothing - where - f (SomeException e) = Just $ show e - -runNode :: MonadIO m => NodeT m a -> Handler m a -runNode action = do - nodeStateM <- asks handlerNodeState - case nodeStateM of - Just nodeState -> lift $ runNodeT action nodeState - _ -> error "runNode: No node state available" - -{- Server Handlers -} - -accountReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> Handler m (Maybe Value) -accountReq name = do - $(logInfo) $ format $ unlines - [ "Account" - , " Account name: " ++ unpack name - ] - Entity _ acc <- runDB $ getAccount name - return $ Just $ toJSON $ toJsonAccount Nothing acc - -accountsReq :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => ListRequest - -> Handler m (Maybe Value) -accountsReq lq@ListRequest{..} = do - $(logInfo) $ format $ unlines - [ "Accounts" - , " Offset : " ++ show listOffset - , " Limit : " ++ show listLimit - , " Reversed : " ++ show listReverse - ] - (accs, cnt) <- runDB $ accounts lq - return $ Just $ toJSON $ ListResult (map (toJsonAccount Nothing) accs) cnt - -newAccountReq - :: (MonadResource m, MonadThrow m, MonadLoggerIO m, MonadBaseControl IO m) - => NewAccount -> Handler m (Maybe Value) -newAccountReq newAcc@NewAccount{..} = do - $(logInfo) $ format $ unlines - [ "NewAccount" - , " Account name: " ++ unpack newAccountName - , " Account type: " ++ show newAccountType - ] - (Entity _ newAcc', mnemonicM) <- runDB $ newAccount newAcc - -- Update the bloom filter if the account is complete - whenOnline $ when (isCompleteAccount newAcc') updateNodeFilter - return $ Just $ toJSON $ toJsonAccount mnemonicM newAcc' - -renameAccountReq - :: (MonadResource m, MonadThrow m, MonadLoggerIO m, MonadBaseControl IO m) - => AccountName -> AccountName -> Handler m (Maybe Value) -renameAccountReq oldName newName = do - $(logInfo) $ format $ unlines - [ "RenameAccount" - , " Account name: " ++ unpack oldName - , " New name : " ++ unpack newName - ] - newAcc <- runDB $ do - accE <- getAccount oldName - renameAccount accE newName - return $ Just $ toJSON $ toJsonAccount Nothing newAcc - -addPubKeysReq - :: (MonadResource m, MonadThrow m, MonadLoggerIO m, MonadBaseControl IO m) - => AccountName -> [XPubKey] -> Handler m (Maybe Value) -addPubKeysReq name keys = do - $(logInfo) $ format $ unlines - [ "AddPubKeys" - , " Account name: " ++ unpack name - , " Key count : " ++ show (length keys) - ] - newAcc <- runDB $ do - accE <- getAccount name - addAccountKeys accE keys - -- Update the bloom filter if the account is complete - whenOnline $ when (isCompleteAccount newAcc) updateNodeFilter - return $ Just $ toJSON $ toJsonAccount Nothing newAcc - -setAccountGapReq :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadBase IO m - , MonadThrow m - , MonadResource m - ) - => AccountName -> Word32 - -> Handler m (Maybe Value) -setAccountGapReq name gap = do - $(logInfo) $ format $ unlines - [ "SetAccountGap" - , " Account name: " ++ unpack name - , " New gap size: " ++ show gap - ] - -- Update the gap - Entity _ newAcc <- runDB $ do - accE <- getAccount name - setAccountGap accE gap - -- Update the bloom filter - whenOnline updateNodeFilter - return $ Just $ toJSON $ toJsonAccount Nothing newAcc - -addrsReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName - -> AddressType - -> Word32 - -> Bool - -> ListRequest - -> Handler m (Maybe Value) -addrsReq name addrType minConf offline listReq = do - $(logInfo) $ format $ unlines - [ "Addresses" - , " Account name: " ++ unpack name - , " Address type: " ++ show addrType - , " Start index : " ++ show (listOffset listReq) - , " Reversed : " ++ show (listReverse listReq) - , " MinConf : " ++ show minConf - , " Offline : " ++ show offline - ] - - (res, bals, cnt) <- runDB $ do - accE <- getAccount name - (res, cnt) <- addressList accE addrType listReq - case res of - [] -> return (res, [], cnt) - _ -> do - let is = map walletAddrIndex res - (iMin, iMax) = (minimum is, maximum is) - bals <- addressBalances accE iMin iMax addrType minConf offline - return (res, bals, cnt) - - -- Join addresses and balances together - let g (addr, bal) = toJsonAddr addr (Just bal) - addrBals = map g $ M.elems $ joinAddrs res bals - return $ Just $ toJSON $ ListResult addrBals cnt - where - joinAddrs addrs bals = - let f addr = (walletAddrIndex addr, addr) - in M.intersectionWith (,) (M.fromList $ map f addrs) (M.fromList bals) - -unusedAddrsReq - :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> AddressType -> ListRequest -> Handler m (Maybe Value) -unusedAddrsReq name addrType lq@ListRequest{..} = do - $(logInfo) $ format $ unlines - [ "UnusedAddrs" - , " Account name: " ++ unpack name - , " Address type: " ++ show addrType - , " Offset : " ++ show listOffset - , " Limit : " ++ show listLimit - , " Reversed : " ++ show listReverse - ] - - (addrs, cnt) <- runDB $ do - accE <- getAccount name - unusedAddresses accE addrType lq - - return $ Just $ toJSON $ ListResult (map (`toJsonAddr` Nothing) addrs) cnt - -addressReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> KeyIndex -> AddressType - -> Word32 -> Bool - -> Handler m (Maybe Value) -addressReq name i addrType minConf offline = do - $(logInfo) $ format $ unlines - [ "Address" - , " Account name: " ++ unpack name - , " Index : " ++ show i - , " Address type: " ++ show addrType - ] - - (addr, balM) <- runDB $ do - accE <- getAccount name - addrE <- getAddress accE addrType i - bals <- addressBalances accE i i addrType minConf offline - return $ case bals of - ((_,bal):_) -> (entityVal addrE, Just bal) - _ -> (entityVal addrE, Nothing) - return $ Just $ toJSON $ toJsonAddr addr balM - --- TODO: How can we generalize this? Perhaps as part of wallet searching? -pubKeyIndexReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName - -> PubKeyC - -> AddressType - -> Handler m (Maybe Value) -pubKeyIndexReq name key addrType = do - $(logInfo) $ format $ unlines - [ "PubKeyIndex" - , " Account name: " ++ unpack name - , " Key : " ++ show key - , " Address type: " ++ show addrType - ] - addrLst <- runDB $ do - accE <- getAccount name - lookupByPubKey accE key addrType - -- TODO: We don't return the balance for now. Maybe add it? Or not? - return $ Just $ toJSON $ map (`toJsonAddr` Nothing) addrLst - -setAddrLabelReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName - -> KeyIndex - -> AddressType - -> Text - -> Handler m (Maybe Value) -setAddrLabelReq name i addrType label = do - $(logInfo) $ format $ unlines - [ "SetAddrLabel" - , " Account name: " ++ unpack name - , " Index : " ++ show i - , " Label : " ++ unpack label - ] - - newAddr <- runDB $ do - accE <- getAccount name - setAddrLabel accE i addrType label - - return $ Just $ toJSON $ toJsonAddr newAddr Nothing - -generateAddrsReq :: ( MonadLoggerIO m - , MonadBaseControl IO m - , MonadThrow m - , MonadBase IO m - , MonadResource m - ) - => AccountName - -> KeyIndex - -> AddressType - -> Handler m (Maybe Value) -generateAddrsReq name i addrType = do - $(logInfo) $ format $ unlines - [ "GenerateAddrs" - , " Account name: " ++ unpack name - , " Index : " ++ show i - ] - - cnt <- runDB $ do - accE <- getAccount name - generateAddrs accE addrType i - - -- Update the bloom filter - whenOnline updateNodeFilter - - return $ Just $ toJSON cnt - --- This is a generic function (see specifics below) -getTxs :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName - -> ListRequest - -> String - -> (AccountId -> ListRequest -> SqlPersistT m ([WalletTx], Word32)) - -> Handler m (Maybe Value) -getTxs name lq@ListRequest{..} cmd f = do - $(logInfo) $ format $ unlines - [ cmd - , " Account name: " ++ unpack name - , " Offset : " ++ show listOffset - , " Limit : " ++ show listLimit - , " Reversed : " ++ show listReverse - ] - - (res, cnt, bb) <- runDB $ do - Entity ai _ <- getAccount name - bb <- walletBestBlock - (res, cnt) <- f ai lq - return (res, cnt, bb) - - return $ Just $ toJSON $ ListResult (map (g bb) res) cnt - where - g bb = toJsonTx name (Just bb) - -txsReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> ListRequest -> Handler m (Maybe Value) -txsReq name lq = getTxs name lq "Txs" (txs Nothing) - -pendingTxsReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> ListRequest -> Handler m (Maybe Value) -pendingTxsReq name lq = getTxs name lq "PendingTxs" (txs (Just TxPending)) - -deadTxsReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> ListRequest -> Handler m (Maybe Value) -deadTxsReq name lq = getTxs name lq "DeadTxs" (txs (Just TxDead)) - -addrTxsReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> KeyIndex -> AddressType -> ListRequest - -> Handler m (Maybe Value) -addrTxsReq name index addrType lq@ListRequest{..} = do - $(logInfo) $ format $ unlines - [ "AddrTxs" - , " Account name : " ++ unpack name - , " Address index: " ++ show index - , " Address type : " ++ show addrType - , " Offset : " ++ show listOffset - , " Limit : " ++ show listLimit - , " Reversed : " ++ show listReverse - ] - - (res, cnt, bb) <- runDB $ do - accE <- getAccount name - addrE <- getAddress accE addrType index - bb <- walletBestBlock - (res, cnt) <- addrTxs accE addrE lq - return (res, cnt, bb) - - return $ Just $ toJSON $ ListResult (map (f bb) res) cnt - where - f bb = toJsonTx name (Just bb) - -createTxReq :: ( MonadLoggerIO m, MonadBaseControl IO m, MonadBase IO m - , MonadThrow m, MonadResource m - ) - => AccountName - -> CreateTx - -> Handler m (Maybe Value) -createTxReq name (CreateTx rs fee minconf rcptFee sign masterM) = do - $(logInfo) $ format $ unlines - [ "CreateTx" - , " Account name: " ++ unpack name - , " Recipients : " ++ show (map (first addrToBase58) rs) - , " Fee : " ++ show fee - , " Minconf : " ++ show minconf - , " Rcpt. Fee : " ++ show rcptFee - , " Sign : " ++ show sign - ] - - notif <- asks handlerNotifChan - - (bb, txRes, newAddrs) <- runDB $ do - accE <- getAccount name - bb <- walletBestBlock - (txRes, newAddrs) <- createWalletTx - accE (Just notif) masterM rs fee minconf rcptFee sign - return (bb, txRes, newAddrs) - - whenOnline $ do - -- Update the bloom filter - unless (null newAddrs) updateNodeFilter - -- If the transaction is pending, broadcast it to the network - when (walletTxConfidence txRes == TxPending) $ - runNode $ broadcastTxs [walletTxHash txRes] - return $ Just $ toJSON $ toJsonTx name (Just bb) txRes - -importTxReq :: ( MonadLoggerIO m, MonadBaseControl IO m, MonadBase IO m - , MonadThrow m, MonadResource m - ) - => AccountName -> Tx -> Handler m (Maybe Value) -importTxReq name tx = do - $(logInfo) $ format $ unlines - [ "ImportTx" - , " Account name: " ++ unpack name - , " TxId : " ++ cs (txHashToHex (txHash tx)) - ] - - notif <- asks handlerNotifChan - - (bb, txRes, newAddrs) <- runDB $ do - Entity ai _ <- getAccount name - bb <- walletBestBlock - (res, newAddrs) <- importTx tx (Just notif) ai - case filter ((== ai) . walletTxAccount) res of - (txRes:_) -> return (bb, txRes, newAddrs) - _ -> throwM $ WalletException "Could not import the transaction" - - whenOnline $ do - -- Update the bloom filter - unless (null newAddrs) updateNodeFilter - -- If the transaction is pending, broadcast it to the network - when (walletTxConfidence txRes == TxPending) $ - runNode $ broadcastTxs [walletTxHash txRes] - return $ Just $ toJSON $ toJsonTx name (Just bb) txRes - -signTxReq :: ( MonadLoggerIO m, MonadBaseControl IO m, MonadBase IO m - , MonadThrow m, MonadResource m - ) - => AccountName -> SignTx -> Handler m (Maybe Value) -signTxReq name (SignTx txid masterM) = do - $(logInfo) $ format $ unlines - [ "SignTx" - , " Account name: " ++ unpack name - , " TxId : " ++ cs (txHashToHex txid) - ] - - notif <- asks handlerNotifChan - - (bb, txRes, newAddrs) <- runDB $ do - accE@(Entity ai _) <- getAccount name - bb <- walletBestBlock - (res, newAddrs) <- signAccountTx accE (Just notif) masterM txid - case filter ((== ai) . walletTxAccount) res of - (txRes:_) -> return (bb, txRes, newAddrs) - _ -> throwM $ WalletException "Could not sign the transaction" - - whenOnline $ do - -- Update the bloom filter - unless (null newAddrs) updateNodeFilter - -- If the transaction is pending, broadcast it to the network - when (walletTxConfidence txRes == TxPending) $ - runNode $ broadcastTxs [walletTxHash txRes] - return $ Just $ toJSON $ toJsonTx name (Just bb) txRes - -txReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> TxHash -> Handler m (Maybe Value) -txReq name txid = do - $(logInfo) $ format $ unlines - [ "Tx" - , " Account name: " ++ unpack name - , " TxId : " ++ cs (txHashToHex txid) - ] - (res, bb) <- runDB $ do - Entity ai _ <- getAccount name - bb <- walletBestBlock - res <- getAccountTx ai txid - return (res, bb) - return $ Just $ toJSON $ toJsonTx name (Just bb) res - --- TODO: This should be limited to a single account -deleteTxReq :: (MonadLoggerIO m, MonadThrow m, MonadBaseControl IO m) - => TxHash -> Handler m (Maybe Value) -deleteTxReq txid = do - $(logInfo) $ format $ unlines - [ "DeleteTx" - , " TxId: " ++ cs (txHashToHex txid) - ] - runDB $ deleteTx txid - return Nothing - -offlineTxReq :: ( MonadLoggerIO m, MonadBaseControl IO m - , MonadBase IO m, MonadThrow m, MonadResource m - ) - => AccountName -> TxHash -> Handler m (Maybe Value) -offlineTxReq accountName txid = do - $(logInfo) $ format $ unlines - [ "OfflineTx" - , " Account name: " ++ unpack accountName - , " TxId : " ++ cs (txHashToHex txid) - ] - (dat, _) <- runDB $ do - Entity ai _ <- getAccount accountName - getOfflineTxData ai txid - return $ Just $ toJSON dat - -signOfflineTxReq :: ( MonadLoggerIO m, MonadBaseControl IO m - , MonadBase IO m, MonadThrow m, MonadResource m - ) - => AccountName - -> Maybe XPrvKey - -> Tx - -> [CoinSignData] - -> Handler m (Maybe Value) -signOfflineTxReq accountName masterM tx signData = do - $(logInfo) $ format $ unlines - [ "SignOfflineTx" - , " Account name: " ++ unpack accountName - , " TxId : " ++ cs (txHashToHex (txHash tx)) - ] - Entity _ acc <- runDB $ getAccount accountName - let signedTx = signOfflineTx acc masterM tx signData - complete = verifyStdTx signedTx $ map toDat signData - toDat CoinSignData{..} = (coinSignScriptOutput, coinSignOutPoint) - return $ Just $ toJSON $ TxCompleteRes signedTx complete - -balanceReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => AccountName -> Word32 -> Bool - -> Handler m (Maybe Value) -balanceReq name minconf offline = do - $(logInfo) $ format $ unlines - [ "Balance" - , " Account name: " ++ unpack name - , " Minconf : " ++ show minconf - , " Offline : " ++ show offline - ] - bal <- runDB $ do - Entity ai _ <- getAccount name - accountBalance ai minconf offline - return $ Just $ toJSON bal - -nodeActionReq :: (MonadLoggerIO m, MonadBaseControl IO m, MonadThrow m) - => NodeAction -> Handler m (Maybe Value) -nodeActionReq action = case action of - NodeActionRescan tM -> do - t <- case tM of - Just t -> return $ adjustFCTime t - Nothing -> do - timeM <- runDB firstAddrTime - maybe err (return . adjustFCTime) timeM - $(logInfo) $ format $ unlines - [ "Node Rescan" - , " Timestamp: " ++ show t - ] - whenOnline $ do - runDB resetRescan - runNode $ atomicallyNodeT $ rescanTs t - return $ Just $ toJSON $ RescanRes t - NodeActionStatus -> do - $(logInfo) $ format "Node Status" - status <- runNode $ atomicallyNodeT nodeStatus - return $ Just $ toJSON status - where - err = throwM $ WalletException - "No keys have been generated in the wallet" - -syncReq :: (MonadThrow m, MonadLoggerIO m, MonadBaseControl IO m) - => AccountName - -> Either BlockHeight BlockHash - -> ListRequest - -> Handler m (Maybe Value) -syncReq acc blockE lq@ListRequest{..} = runDB $ do - $(logInfo) $ format $ unlines - [ "Sync" - , " Account name: " ++ cs acc - , " Block : " ++ showBlock - , " Offset : " ++ show listOffset - , " Limit : " ++ show listLimit - , " Reversed : " ++ show listReverse - ] - ListResult nodes cnt <- mainChain blockE lq - case nodes of - [] -> return $ Just $ toJSON $ ListResult ([] :: [()]) cnt - b:_ -> do - Entity ai _ <- getAccount acc - ts <- accTxsFromBlock ai (nodeBlockHeight b) - (fromIntegral $ length nodes) - let bts = blockTxs nodes ts - return $ Just $ toJSON $ ListResult (map f bts) cnt - where - f (block, txs') = JsonSyncBlock - { jsonSyncBlockHash = nodeHash block - , jsonSyncBlockHeight = nodeBlockHeight block - , jsonSyncBlockPrev = nodePrev block - , jsonSyncBlockTxs = map (toJsonTx acc Nothing) txs' - } - showBlock = case blockE of - Left e -> show e - Right b -> cs $ blockHashToHex b - -blockInfoReq :: (MonadThrow m, MonadLoggerIO m, MonadBaseControl IO m) - => [BlockHash] -> Handler m (Maybe Value) -blockInfoReq headerLst = do - $(logInfo) $ format "Received BlockInfo request" - lstMaybeBlk <- forM headerLst (runNode . runSqlNodeT . getBlockByHash) - return $ toJSON <$> Just (handleRes lstMaybeBlk) - where - handleRes :: [Maybe NodeBlock] -> [BlockInfo] - handleRes = map fromNodeBlock . catMaybes - -stopServerReq :: MonadLoggerIO m => Handler m (Maybe Value) -stopServerReq = do - $(logInfo) $ format "Received StopServer request" - return Nothing - -{- Helpers -} - -whenOnline :: Monad m => Handler m () -> Handler m () -whenOnline handler = do - mode <- configMode `liftM` asks handlerConfig - when (mode == SPVOnline) handler - -updateNodeFilter - :: (MonadBaseControl IO m, MonadLoggerIO m, MonadThrow m) - => Handler m () -updateNodeFilter = do - $(logInfo) $ format "Sending a new bloom filter" - (bloom, elems, _) <- runDB getBloomFilter - runNode $ atomicallyNodeT $ sendBloomFilter bloom elems - -adjustFCTime :: Timestamp -> Timestamp -adjustFCTime ts = fromInteger $ max 0 $ toInteger ts - 86400 * 7 - -format :: String -> Text -format str = pack $ "[ZeroMQ] " ++ str - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Settings.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Settings.hs deleted file mode 100644 index 824d0507..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Settings.hs +++ /dev/null @@ -1,202 +0,0 @@ -module Network.Haskoin.Wallet.Settings -( SPVMode(..) -, OutputFormat(..) -, Config(..) -) where - -import Control.Monad (mzero) -import Control.Exception (throw) -import Control.Monad.Logger (LogLevel(..)) - -import Data.Default (Default, def) -import Data.FileEmbed (embedFile) -import Data.Yaml (decodeEither') -import Data.Word (Word8, Word32, Word64) -import Data.HashMap.Strict (HashMap, unionWith) -import Data.String.Conversions (cs) -import Data.ByteString (ByteString) -import Data.Text (Text) -import Data.Text.Encoding (encodeUtf8) -import Data.Aeson - ( Value(..), FromJSON, ToJSON - , parseJSON, toJSON, withObject - , (.:) - ) - -import Network.Haskoin.Crypto -import Network.Haskoin.Wallet.Database -import Network.Haskoin.Wallet.Types - -import Data.Restricted (Restricted, Div5) -import System.ZMQ4 (toRestricted) - -data SPVMode = SPVOnline | SPVOffline - deriving (Eq, Show, Read) - -newtype LogLevelJSON = LogLevelJSON LogLevel - deriving (Eq, Show, Read) - -data OutputFormat - = OutputNormal - | OutputJSON - | OutputYAML - -data Config = Config - { configCount :: !Word32 - -- ^ Output size of commands - , configMinConf :: !Word32 - -- ^ Minimum number of confirmations - , configSignTx :: !Bool - -- ^ Sign transactions - , configFee :: !Word64 - -- ^ Fee to pay per 1000 bytes when creating new transactions - , configRcptFee :: !Bool - -- ^ Recipient pays fee (dangerous, no config file setting) - , configAddrType :: !AddressType - -- ^ Return internal instead of external addresses - , configDisplayPubKeys :: !Bool - -- ^ Display public keys instead of addresses - , configOffline :: !Bool - -- ^ Display the balance including offline transactions - , configEntropy :: !Word8 - -- ^ Entropy in bytes to use when generating a mnemonic (between 16 and 32) - , configReversePaging :: !Bool - -- ^ Use reverse paging for displaying addresses and transactions - , configDerivIndex :: !KeyIndex - -- ^ Derivation path when creating account - , configFormat :: !OutputFormat - -- ^ How to format the command-line results - , configConnect :: !String - -- ^ ZeroMQ socket to connect to (location of the server) - , configConnectNotif :: !String - -- ^ ZeroMQ socket to connect for notifications - , configDetach :: !Bool - -- ^ Detach server when launched from command-line - , configFile :: !FilePath - -- ^ Configuration file - , configTestnet :: !Bool - -- ^ Use Testnet3 network - , configDir :: !FilePath - -- ^ Working directory - , configBind :: !String - -- ^ Bind address for the ZeroMQ socket - , configBindNotif :: !String - -- ^ Bind address for ZeroMQ notifications - , configBTCNodes :: !(HashMap Text [BTCNode]) - -- ^ Trusted Bitcoin full nodes to connect to - , configMode :: !SPVMode - -- ^ Operation mode of the SPV node. - , configBloomFP :: !Double - -- ^ False positive rate for the bloom filter. - , configDatabase :: !(HashMap Text DatabaseConfType) - -- ^ Database configuration - , configLogFile :: !FilePath - -- ^ Log file - , configPidFile :: !FilePath - -- ^ PID File - , configLogLevel :: !LogLevel - -- ^ Log level - , configVerbose :: !Bool - -- ^ Verbose - , configServerKey :: !(Maybe (Restricted Div5 ByteString)) - -- ^ Server key for authentication and encryption (server config) - , configServerKeyPub :: !(Maybe (Restricted Div5 ByteString)) - -- ^ Server public key for authentication and encryption (client config) - , configClientKey :: !(Maybe (Restricted Div5 ByteString)) - -- ^ Client key for authentication and encryption (client config) - , configClientKeyPub :: !(Maybe (Restricted Div5 ByteString)) - -- ^ Client public key for authentication and encryption - -- (client + server config) - } - -configBS :: ByteString -configBS = $(embedFile "config/config.yml") - -instance ToJSON OutputFormat where - toJSON OutputNormal = String "normal" - toJSON OutputJSON = String "json" - toJSON OutputYAML = String "yaml" - -instance FromJSON OutputFormat where - parseJSON (String "normal") = return OutputNormal - parseJSON (String "json") = return OutputJSON - parseJSON (String "yaml") = return OutputYAML - parseJSON _ = mzero - -instance ToJSON SPVMode where - toJSON SPVOnline = String "online" - toJSON SPVOffline = String "offline" - -instance FromJSON SPVMode where - parseJSON (String "online") = return SPVOnline - parseJSON (String "offline") = return SPVOffline - parseJSON _ = mzero - -instance ToJSON LogLevelJSON where - toJSON (LogLevelJSON LevelDebug) = String "debug" - toJSON (LogLevelJSON LevelInfo) = String "info" - toJSON (LogLevelJSON LevelWarn) = String "warn" - toJSON (LogLevelJSON LevelError) = String "error" - toJSON (LogLevelJSON (LevelOther t)) = String t - -instance FromJSON LogLevelJSON where - parseJSON (String "debug") = return $ LogLevelJSON LevelDebug - parseJSON (String "info") = return $ LogLevelJSON LevelInfo - parseJSON (String "warn") = return $ LogLevelJSON LevelWarn - parseJSON (String "error") = return $ LogLevelJSON LevelError - parseJSON (String x) = return $ LogLevelJSON (LevelOther x) - parseJSON _ = mzero - -instance Default Config where - def = either throw id $ decodeEither' "{}" - -instance FromJSON Config where - parseJSON = withObject "config" $ \o' -> do - let defValue = either throw id $ decodeEither' configBS - (Object o) = mergeValues defValue (Object o') - configDerivIndex = 0 - configFile <- o .: "config-file" - configRcptFee <- o .: "recipient-fee" - configCount <- o .: "output-size" - configMinConf <- o .: "minimum-confirmations" - configSignTx <- o .: "sign-transactions" - configFee <- o .: "transaction-fee" - configAddrType <- o .: "address-type" - configDisplayPubKeys <- o .: "display-pubkeys" - configOffline <- o .: "offline" - configEntropy <- o .: "seed-entropy" - configReversePaging <- o .: "reverse-paging" - configFormat <- o .: "display-format" - configConnect <- o .: "connect-uri" - configConnectNotif <- o .: "connect-uri-notif" - configDetach <- o .: "detach-server" - configTestnet <- o .: "use-testnet" - configDir <- o .: "work-dir" - configBind <- o .: "bind-socket" - configBindNotif <- o .: "bind-socket-notif" - configBTCNodes <- o .: "bitcoin-full-nodes" - configMode <- o .: "server-mode" - configBloomFP <- o .: "bloom-false-positive" - configLogFile <- o .: "log-file" - configPidFile <- o .: "pid-file" - LogLevelJSON configLogLevel <- o .: "log-level" - configVerbose <- o .: "verbose" - configDatabase <- o .: "database" - configServerKey <- getKey o "server-key" - configServerKeyPub <- getKey o "server-key-public" - configClientKey <- getKey o "client-key" - configClientKeyPub <- getKey o "client-key-public" - return Config {..} - where - getKey o i = o .: i >>= \kM -> - case kM of - Nothing -> return Nothing - Just k -> - case toRestricted $ encodeUtf8 k of - Just k' -> return $ Just k' - Nothing -> fail $ "Invalid " ++ cs k - - -mergeValues :: Value -> Value -> Value -mergeValues (Object d) (Object c) = Object (unionWith mergeValues d c) -mergeValues _ c = c diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Transaction.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Transaction.hs deleted file mode 100644 index ef14606c..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Transaction.hs +++ /dev/null @@ -1,1374 +0,0 @@ -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE RecordWildCards #-} -module Network.Haskoin.Wallet.Transaction -( --- *Database transactions - txs -, addrTxs -, accTxsFromBlock -, getTx -, getAccountTx -, importTx -, importNetTx -, signAccountTx -, createWalletTx -, signOfflineTx -, getOfflineTxData -, killTxs -, reviveTx -, getPendingTxs -, deleteTx - --- *Database blocks -, importMerkles -, walletBestBlock - --- *Database coins and balances -, spendableCoins -, accountBalance -, addressBalances - --- *Rescan -, resetRescan - --- *Helpers -, isCoinbaseTx -, InCoinData(..) -) where - -import Control.Arrow (second) -import Control.Concurrent.STM (atomically) -import Control.Concurrent.STM.TBMChan (TBMChan, writeTBMChan) -import Control.Exception (throw, throwIO) -import Control.Monad (forM, forM_, unless, when) -import Control.Monad.Base (MonadBase) -import Control.Monad.Catch (MonadThrow, throwM) -import Control.Monad.Trans (MonadIO, liftIO) -import Control.Monad.Trans.Resource (MonadResource) -import qualified Data.ByteString as BS -import Data.Either (fromRight, rights) -import Data.List (find, nub, nubBy, (\\)) -import qualified Data.Map.Strict as M (Map, fromListWith, map, - toList, unionWith) -import Data.Maybe (fromMaybe, isJust, isNothing, - listToMaybe, mapMaybe) -import Data.Serialize (decode) -import Data.String.Conversions (cs) -import Data.Time (UTCTime, getCurrentTime) -import Data.Word (Word32, Word64) -import Database.Esqueleto (Entity (..), InnerJoin (..), - LeftOuterJoin (..), OrderBy, - SqlExpr, SqlPersistT, - SqlQuery, Value (..), asc, - case_, coalesceDefault, count, - countDistinct, countRows, - delete, desc, distinct, else_, - from, get, getBy, groupBy, - in_, just, limit, not_, on, - orderBy, replace, select, set, - sub_select, sum_, then_, - unValue, update, val, valList, - when_, where_, (!=.), (&&.), - (-.), (<.), (<=.), (=.), - (==.), (>=.), (?.), (^.), - (||.)) -import qualified Database.Esqueleto as E (isNothing) -import qualified Database.Persist as P (Filter, deleteWhere, - insertBy, selectFirst) -import Network.Haskoin.Block -import Network.Haskoin.Constants -import Network.Haskoin.Crypto -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Node.STM -import Network.Haskoin.Script -import Network.Haskoin.Transaction -import Network.Haskoin.Util -import Network.Haskoin.Wallet.Accounts -import Network.Haskoin.Wallet.Model -import Network.Haskoin.Wallet.Types - --- Input coin type with transaction and address information -data InCoinData = InCoinData - { inCoinDataCoin :: !(Entity WalletCoin) - , inCoinDataTx :: !WalletTx - , inCoinDataAddr :: !WalletAddr - } - -instance Coin InCoinData where - coinValue (InCoinData (Entity _ c) _ _) = walletCoinValue c - --- Output coin type with address information -data OutCoinData = OutCoinData - { outCoinDataAddr :: !(Entity WalletAddr) - , outCoinDataPos :: !KeyIndex - , outCoinDataValue :: !Word64 - , outCoinDataScript :: !ScriptOutput - } - -{- List transactions -} - --- | Get transactions. -txs :: MonadIO m - => Maybe TxConfidence - -> AccountId -- ^ Account ID - -> ListRequest -- ^ List request - -> SqlPersistT m ([WalletTx], Word32) - -- ^ List result -txs conf ai ListRequest{..} = do - [cnt] <- fmap (map unValue) $ select $ from $ \t -> do - cond t - return countRows - when (listOffset > 0 && listOffset >= cnt) $ throw $ WalletException - "Offset beyond end of data set" - res <- fmap (map entityVal) $ select $ from $ \t -> do - cond t - orderBy [ order (t ^. WalletTxId) ] - limitOffset listLimit listOffset - return t - return (res, cnt) - where - account t = t ^. WalletTxAccount ==. val ai - cond t = where_ $ case conf of - Just n -> account t &&. t ^. WalletTxConfidence ==. val n - Nothing -> account t - order = if listReverse then asc else desc - -{- List transactions for an account and address -} - -addrTxs :: MonadIO m - => Entity Account -- ^ Account entity - -> Entity WalletAddr -- ^ Address entity - -> ListRequest -- ^ List request - -> SqlPersistT m ([WalletTx], Word32) -addrTxs (Entity ai _) (Entity addrI WalletAddr{..}) ListRequest{..} = do - let joinSpentCoin c2 s = - c2 ?. WalletCoinAccount ==. s ?. SpentCoinAccount - &&. c2 ?. WalletCoinHash ==. s ?. SpentCoinHash - &&. c2 ?. WalletCoinPos ==. s ?. SpentCoinPos - &&. c2 ?. WalletCoinAddr ==. just (val addrI) - joinSpent s t = - s ?. SpentCoinSpendingTx ==. just (t ^. WalletTxId) - joinCoin c t = - c ?. WalletCoinTx ==. just (t ^. WalletTxId) - &&. c ?. WalletCoinAddr ==. just (val addrI) - joinAll t c c2 s = do - on $ joinSpentCoin c2 s - on $ joinSpent s t - on $ joinCoin c t - tables f = from $ \(t `LeftOuterJoin` c `LeftOuterJoin` - s `LeftOuterJoin` c2) -> f t c s c2 - query t c s c2 = do - joinAll t c c2 s - where_ ( t ^. WalletTxAccount ==. val ai - &&. ( not_ (E.isNothing (c ?. WalletCoinId)) - ||. not_ (E.isNothing (c2 ?. WalletCoinId)) - ) - ) - let order = if listReverse then asc else desc - orderBy [ order (t ^. WalletTxId) ] - - - cntRes <- select $ tables $ \t c s c2 -> do - query t c s c2 - return $ countDistinct $ t ^. WalletTxId - - let cnt = maybe 0 unValue $ listToMaybe cntRes - - when (listOffset > 0 && listOffset >= cnt) $ throw $ WalletException - "Offset beyond end of data set" - - res <- select $ distinct $ tables $ \t c s c2 -> do - query t c s c2 - limitOffset listLimit listOffset - return t - - return (map (updBals . entityVal) res, cnt) - - where - agg = sum . mapMaybe addressInfoValue . - filter ((== walletAddrAddress) . addressInfoAddress) - updBals t = - let - input = agg $ walletTxInputs t - output = agg $ walletTxOutputs t - change = agg $ walletTxChange t - in - t { walletTxInValue = output + change - , walletTxOutValue = input - } - -accTxsFromBlock :: (MonadIO m, MonadThrow m) - => AccountId - -> BlockHeight - -> Word32 -- ^ Block count (0 for all) - -> SqlPersistT m [WalletTx] -accTxsFromBlock ai bh n = - fmap (map entityVal) $ select $ from $ \t -> do - query t - orderBy [ asc (t ^. WalletTxConfirmedHeight), asc (t ^. WalletTxId) ] - return t - where - query t - | n == 0 = where_ $ - t ^. WalletTxAccount ==. val ai &&. - t ^. WalletTxConfirmedHeight >=. just (val bh) - | otherwise = where_ $ - t ^. WalletTxAccount ==. val ai &&. - t ^. WalletTxConfirmedHeight >=. just (val bh) &&. - t ^. WalletTxConfirmedHeight <. just (val $ bh + n) - --- Helper function to get a transaction from the wallet database. The function --- will look across all accounts and return the first available transaction. If --- the transaction does not exist, this function will throw a wallet exception. -getTx :: MonadIO m => TxHash -> SqlPersistT m (Maybe Tx) -getTx txid = - fmap (listToMaybe . map unValue) $ select $ from $ \t -> do - where_ $ t ^. WalletTxHash ==. val txid - limit 1 - return $ t ^. WalletTxTx - -getAccountTx :: MonadIO m - => AccountId -> TxHash -> SqlPersistT m WalletTx -getAccountTx ai txid = do - res <- select $ from $ \t -> do - where_ ( t ^. WalletTxAccount ==. val ai - &&. t ^. WalletTxHash ==. val txid - ) - return t - case res of - (Entity _ tx:_) -> return tx - _ -> liftIO . throwIO $ WalletException $ unwords - [ "Transaction does not exist:", cs $ txHashToHex txid ] - --- Helper function to get all the pending transactions from the database. It is --- used to re-broadcast pending transactions in the wallet that have not been --- included into blocks yet. -getPendingTxs :: MonadIO m => Int -> SqlPersistT m [TxHash] -getPendingTxs i = - fmap (map unValue) $ select $ from $ \t -> do - where_ $ t ^. WalletTxConfidence ==. val TxPending - when (i > 0) $ limit $ fromIntegral i - return $ t ^. WalletTxHash - -{- Transaction Import -} - --- | Import a transaction into the wallet from an unknown source. If the --- transaction is standard, valid, all inputs are known and all inputs can be --- spent, then the transaction will be imported as a network transaction. --- Otherwise, the transaction will be imported into the local account as an --- offline transaction. -importTx :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Tx -- ^ Transaction to import - -> Maybe (TBMChan Notif) - -> AccountId -- ^ Account ID - -> SqlPersistT m ([WalletTx], [WalletAddr]) - -- ^ New transactions and addresses created -importTx tx notifChanM ai = - importTx' tx notifChanM ai =<< getInCoins tx (Just ai) - -importTx' :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Tx -- ^ Transaction to import - -> Maybe (TBMChan Notif) - -> AccountId -- ^ Account ID - -> [InCoinData] -- ^ Input coins - -> SqlPersistT m ([WalletTx], [WalletAddr]) - -- ^ Transaction hash (after possible merges) -importTx' origTx notifChanM ai origInCoins = do - -- Merge the transaction with any previously existing transactions - mergeResM <- mergeNoSigHashTxs ai origTx origInCoins - let tx = fromMaybe origTx mergeResM - origTxid = txHash origTx - txid = txHash tx - - -- If the transaction was merged into a new transaction, - -- update the old hashes to the new ones. This allows us to - -- keep the spending information of our coins. It is thus possible - -- to spend partially signed multisignature transactions (as offline - -- transactions) even before all signatures have arrived. - inCoins <- if origTxid == txid then return origInCoins else do - -- Update transactions - update $ \t -> do - set t [ WalletTxHash =. val txid - , WalletTxTx =. val tx - ] - where_ ( t ^. WalletTxAccount ==. val ai - &&. t ^. WalletTxHash ==. val origTxid - ) - -- Update coins - update $ \t -> do - set t [ WalletCoinHash =. val txid ] - where_ ( t ^. WalletCoinAccount ==. val ai - &&. t ^. WalletCoinHash ==. val origTxid - ) - let f (InCoinData c t x) = if walletTxHash t == origTxid - then InCoinData c - t{ walletTxHash = txid, walletTxTx = tx } x - else InCoinData c t x - return $ map f origInCoins - - spendingTxs <- getSpendingTxs tx (Just ai) - - let validTx = verifyStdTx tx $ map toVerDat inCoins - validIn = length inCoins == length (txIn tx) - && canSpendCoins inCoins spendingTxs False - if validIn && validTx - then importNetTx tx notifChanM - else importOfflineTx tx notifChanM ai inCoins spendingTxs - where - toVerDat (InCoinData (Entity _ c) t _) = - (walletCoinScript c, OutPoint (walletTxHash t) (walletCoinPos c)) - --- Offline transactions are usually multisignature transactions requiring --- additional signatures. This function will merge the signatures of --- the same offline transactions together into one single transaction. -mergeNoSigHashTxs :: MonadIO m - => AccountId - -> Tx - -> [InCoinData] - -> SqlPersistT m (Maybe Tx) -mergeNoSigHashTxs ai tx inCoins = do - prevM <- getBy $ UniqueAccNoSig ai $ nosigTxHash tx - return $ case prevM of - Just (Entity _ prev) -> case walletTxConfidence prev of - TxOffline -> eitherToMaybe $ - mergeTxs [tx, walletTxTx prev] outPoints - _ -> Nothing - -- Nothing to merge. Return the original transaction. - _ -> Nothing - where - buildOutpoint c t = OutPoint (walletTxHash t) (walletCoinPos c) - f (InCoinData (Entity _ c) t _) = (walletCoinScript c, buildOutpoint c t) - outPoints = map f inCoins - --- | Import an offline transaction into a specific account. Offline transactions --- are imported either manually or from the wallet when building a partially --- signed multisignature transaction. Offline transactions are only imported --- into one specific account. They will not affect the input or output coins --- of other accounts, including read-only accounts that may watch the same --- addresses as this account. --- --- We allow transactions to be imported manually by this function (unlike --- `importNetTx` which imports only transactions coming from the network). This --- means that it is possible to import completely crafted and invalid --- transactions into the wallet. It is thus important to limit the scope of --- those transactions to only the specific account in which it was imported. --- --- This function will not broadcast these transactions to the network as we --- have no idea if they are valid or not. Transactions are broadcast from the --- transaction creation function and only if the transaction is complete. -importOfflineTx - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Tx - -> Maybe (TBMChan Notif) - -> AccountId - -> [InCoinData] - -> [Entity WalletTx] - -> SqlPersistT m ([WalletTx], [WalletAddr]) -importOfflineTx tx notifChanM ai inCoins spendingTxs = do - -- Get all the new coins to be created by this transaction - outCoins <- getNewCoins tx $ Just ai - -- Only continue if the transaction is relevant to the account - when (null inCoins && null outCoins) err - -- Find the details of an existing transaction if it exists. - prevM <- fmap (fmap entityVal) $ getBy $ UniqueAccTx ai txid - -- Check if we can import the transaction - unless (canImport $ walletTxConfidence <$> prevM) err - -- Kill transactions that are spending our coins - killTxIds notifChanM $ map entityKey spendingTxs - -- Create all the transaction records for this account. - -- This will spend the input coins and create the output coins - txsRes <- buildAccTxs notifChanM tx TxOffline inCoins outCoins - -- use the addresses (refill the gap addresses) - newAddrs <- forM (nubBy sameKey $ map outCoinDataAddr outCoins) $ - useAddress . entityVal - return (txsRes, concat newAddrs) - where - txid = txHash tx - canImport prevConfM = - -- We can only re-import offline txs through this function. - (isNothing prevConfM || prevConfM == Just TxOffline) && - -- Check that all coins can be spent. We allow offline - -- coins to be spent by this function unlike importNetTx. - canSpendCoins inCoins spendingTxs True - sameKey e1 e2 = entityKey e1 == entityKey e2 - err = liftIO . throwIO $ WalletException - "Could not import offline transaction" - --- | Import a transaction from the network into the wallet. This function --- assumes transactions are imported in-order (parents first). It also assumes --- that the confirmations always arrive after the transaction imports. This --- function is idempotent. --- --- When re-importing an existing transaction, this function will recompute --- the inputs, outputs and transaction details for each account. A non-dead --- transaction could be set to dead due to new inputs being double spent. --- However, we do not allow dead transactions to be revived by reimporting them. --- Transactions can only be revived if they make it into the main chain. --- --- This function returns the network confidence of the imported transaction. -importNetTx - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Tx -- Network transaction to import - -> Maybe (TBMChan Notif) - -> SqlPersistT m ([WalletTx], [WalletAddr]) - -- ^ Returns the new transactions and addresses created -importNetTx tx notifChanM = do - -- Find all the coins spent by this transaction - inCoins <- getInCoins tx Nothing - -- Get all the new coins to be created by this transaction - outCoins <- getNewCoins tx Nothing - -- Only continue if the transaction is relevant to the wallet - if null inCoins && null outCoins then return ([],[]) else do - -- Update incomplete offline transactions when the completed - -- transaction comes in from the network. - updateNosigHash tx (nosigTxHash tx) txid - -- Get the transaction spending our coins - spendingTxs <- getSpendingTxs tx Nothing - -- Compute the confidence - let confidence | canSpendCoins inCoins spendingTxs False = TxPending - | otherwise = TxDead - -- Kill transactions that are spending our coins if we are not dead - when (confidence /= TxDead) $ - killTxIds notifChanM $ map entityKey spendingTxs - -- Create all the transaction records for this account. - -- This will spend the input coins and create the output coins - txRes <- buildAccTxs notifChanM tx confidence inCoins outCoins - -- Use up the addresses of our new coins (replenish gap addresses) - newAddrs <- forM (nubBy sameKey $ map outCoinDataAddr outCoins) $ - useAddress . entityVal - forM_ notifChanM $ \notifChan -> forM_ txRes $ - \tx' -> do - let ai = walletTxAccount tx' - Account{..} <- - fromMaybe (error "Velociraptors ate you") <$> get ai - liftIO $ atomically $ writeTBMChan notifChan $ - NotifTx $ toJsonTx accountName Nothing tx' - return (txRes, concat newAddrs) - where - sameKey e1 e2 = entityKey e1 == entityKey e2 - txid = txHash tx - -updateNosigHash :: MonadIO m => Tx -> TxHash -> TxHash -> SqlPersistT m () -updateNosigHash tx nosig txid = do - res <- select $ from $ \t -> do - where_ ( t ^. WalletTxNosigHash ==. val nosig - &&. t ^. WalletTxHash !=. val txid - ) - return $ t ^. WalletTxHash - let toUpdate = map unValue res - unless (null toUpdate) $ do - splitUpdate toUpdate $ \hs t -> do - set t [ WalletTxHash =. val txid - , WalletTxTx =. val tx - ] - where_ $ t ^. WalletTxHash `in_` valList hs - splitUpdate toUpdate $ \hs c -> do - set c [ WalletCoinHash =. val txid ] - where_ $ c ^. WalletCoinHash `in_` valList hs - --- Check if the given coins can be spent. -canSpendCoins :: [InCoinData] - -> [Entity WalletTx] - -> Bool -- True for offline transactions - -> Bool -canSpendCoins inCoins spendingTxs offline = - all validCoin inCoins && - all validSpend spendingTxs - where - -- We can only spend pending and building coins - validCoin (InCoinData _ t _) - | offline = walletTxConfidence t /= TxDead - | otherwise = walletTxConfidence t `elem` [TxPending, TxBuilding] - -- All transactions spending the same coins as us should be offline - validSpend = (== TxOffline) . walletTxConfidence . entityVal - --- Get the coins in the wallet related to the inputs of a transaction. You --- can optionally provide an account to limit the returned coins to that --- account only. -getInCoins :: MonadIO m - => Tx - -> Maybe AccountId - -> SqlPersistT m [InCoinData] -getInCoins tx aiM = do - res <- splitSelect ops $ \os -> from $ \(c `InnerJoin` t `InnerJoin` x) -> do - on $ x ^. WalletAddrId ==. c ^. WalletCoinAddr - on $ t ^. WalletTxId ==. c ^. WalletCoinTx - where_ $ case aiM of - Just ai -> - c ^. WalletCoinAccount ==. val ai &&. limitOutPoints c os - _ -> limitOutPoints c os - return (c, t, x) - return $ map (\(c, t, x) -> InCoinData c (entityVal t) (entityVal x)) res - where - ops = map prevOutput $ txIn tx - limitOutPoints c os = join2 $ map (f c) os - f c (OutPoint h i) = - c ^. WalletCoinHash ==. val h &&. - c ^. WalletCoinPos ==. val i - --- Find all the transactions that are spending the same coins as the given --- transaction. You can optionally provide an account to limit the returned --- transactions to that account only. -getSpendingTxs :: MonadIO m - => Tx - -> Maybe AccountId - -> SqlPersistT m [Entity WalletTx] -getSpendingTxs tx aiM - | null txInputs = return [] - | otherwise = - splitSelect txInputs $ \ins -> - from $ \(s `InnerJoin` t) -> do - on $ s ^. SpentCoinSpendingTx ==. t ^. WalletTxId - -- Filter out the given transaction - let cond = t ^. WalletTxHash !=. val txid &&. limitSpent s ins - where_ $ - case aiM of - Just ai -> cond &&. s ^. SpentCoinAccount ==. val ai - _ -> cond - return t - where - txid = txHash tx - txInputs = filter nonZero $ map prevOutput $ txIn tx - limitSpent s ins = join2 $ map (f s) ins - f s (OutPoint h i) = - s ^. SpentCoinHash ==. val h &&. s ^. SpentCoinPos ==. val i - nonZero (OutPoint h _) - | h == - TxHash - (fromRight - (error "Could not decode zero hash") - (decode (BS.replicate 32 0x00))) = False - | otherwise = True - --- Returns all the new coins that need to be created from a transaction. --- Also returns the addresses associted with those coins. -getNewCoins :: MonadIO m - => Tx - -> Maybe AccountId - -> SqlPersistT m [OutCoinData] -getNewCoins tx aiM = do - -- Find all the addresses which are in the transaction outputs - addrs <- splitSelect uniqueAddrs $ \as -> from $ \x -> do - let cond = x ^. WalletAddrAddress `in_` valList as - where_ $ case aiM of - Just ai -> cond &&. x ^. WalletAddrAccount ==. val ai - _ -> cond - return x - return $ concatMap toCoins addrs - where - uniqueAddrs = nub $ map (\(addr,_,_,_) -> addr) outList - outList = rights $ map toDat txOutputs - txOutputs = zip (txOut tx) [0..] - toDat (out, pos) = getDataFromOutput out >>= \(addr, so) -> - return (addr, out, pos, so) - toCoins addrEnt@(Entity _ addr) = - let f (a,_,_,_) = a == walletAddrAddress addr - in map (toCoin addrEnt) $ filter f outList - toCoin addrEnt (_, out, pos, so) = OutCoinData - { outCoinDataAddr = addrEnt - , outCoinDataPos = pos - , outCoinDataValue = outValue out - , outCoinDataScript = so - } - --- Decode an output and extract an output script and a recipient address -getDataFromOutput :: TxOut -> Either String (Address, ScriptOutput) -getDataFromOutput out = do - so <- decodeOutputBS $ scriptOutput out - addr <- outputAddress so - return (addr, so) - -isCoinbaseTx :: Tx -> Bool -isCoinbaseTx tx = - length (txIn tx) == 1 && outPointHash (prevOutput $ head (txIn tx)) == - "0000000000000000000000000000000000000000000000000000000000000000" - --- | Spend the given input coins. We also create dummy coins for the inputs --- in a transaction that do not belong to us. This is to be able to detect --- double spends when reorgs occur. -spendInputs :: MonadIO m - => AccountId - -> WalletTxId - -> Tx - -> SqlPersistT m () -spendInputs ai ti tx = do - now <- liftIO getCurrentTime - -- Spend the coins by inserting values in SpentCoin - splitInsertMany_ $ map (buildSpentCoin now) $ filter nonZero txInputs - where - txInputs = map prevOutput $ txIn tx - buildSpentCoin now (OutPoint h p) = - SpentCoin - { spentCoinAccount = ai - , spentCoinHash = h - , spentCoinPos = p - , spentCoinSpendingTx = ti - , spentCoinCreated = now - } - nonZero (OutPoint h _) - | h == - TxHash - (fromRight - (error "Could not decode zero hash") - (decode (BS.replicate 32 0x00))) = False - | otherwise = True - --- Build account transaction for the given input and output coins -buildAccTxs :: MonadIO m - => Maybe (TBMChan Notif) - -> Tx - -> TxConfidence - -> [InCoinData] - -> [OutCoinData] - -> SqlPersistT m [WalletTx] -buildAccTxs notifChanM tx confidence inCoins outCoins = do - now <- liftIO getCurrentTime - -- Group the coins by account - let grouped = groupCoinsByAccount inCoins outCoins - forM (M.toList grouped) $ \(ai, (is, os)) -> do - let atx = buildAccTx tx confidence ai is os now - -- Insert the new transaction. If it already exists, update the - -- information with the newly computed values. Also make sure that the - -- confidence is set to the new value (it could have changed to TxDead). - Entity ti newAtx <- P.insertBy atx >>= \resE -> case resE of - Left (Entity ti prev) -> do - let prevConf = walletTxConfidence prev - newConf | confidence == TxDead = TxDead - | prevConf == TxBuilding = TxBuilding - | otherwise = confidence - -- If the transaction already exists, preserve confirmation data - let newAtx = atx - { walletTxConfidence = newConf - , walletTxConfirmedBy = walletTxConfirmedBy prev - , walletTxConfirmedHeight = walletTxConfirmedHeight prev - , walletTxConfirmedDate = walletTxConfirmedDate prev - } - replace ti newAtx - -- Spend inputs only if the previous transaction was dead - when (newConf /= TxDead && prevConf == TxDead) $ - spendInputs ai ti tx - -- If the transaction changed from non-dead to dead, kill it. - -- This will remove spent coins and child transactions. - when (prevConf /= TxDead && newConf == TxDead) $ - killTxIds notifChanM [ti] - return (Entity ti newAtx) - Right ti -> do - when (confidence /= TxDead) $ spendInputs ai ti tx - return (Entity ti atx) - - -- Insert the output coins with updated accTx key - let newOs = map (toCoin ai ti now) os - forM_ newOs $ \c -> P.insertBy c >>= \resE -> case resE of - Left (Entity ci _) -> replace ci c - _ -> return () - - -- Return the new transaction record - return newAtx - where - toCoin ai accTxId now (OutCoinData addrEnt pos vl so) = WalletCoin - { walletCoinAccount = ai - , walletCoinHash = txHash tx - , walletCoinPos = pos - , walletCoinTx = accTxId - , walletCoinValue = vl - , walletCoinScript = so - , walletCoinAddr = entityKey addrEnt - , walletCoinCreated = now - } - --- | Build an account transaction given the input and output coins relevant to --- this specific account. An account transaction contains the details of how a --- transaction affects one particular account (value sent to and from the --- account). The first value is Maybe an existing transaction in the database --- which is used to get the existing confirmation values. -buildAccTx :: Tx - -> TxConfidence - -> AccountId - -> [InCoinData] - -> [OutCoinData] - -> UTCTime - -> WalletTx -buildAccTx tx confidence ai inCoins outCoins now = WalletTx - { walletTxAccount = ai - , walletTxHash = txHash tx - -- This is a hash of the transaction excluding signatures. This allows us - -- to track the evolution of offline transactions as we add more signatures - -- to them. - , walletTxNosigHash = nosigTxHash tx - , walletTxType = txType - , walletTxInValue = inVal - , walletTxOutValue = outVal - , walletTxInputs = - let f h i (InCoinData (Entity _ c) t _) = - walletTxHash t == h && walletCoinPos c == i - toInfo (a, OutPoint h i) = case find (f h i) inCoins of - Just (InCoinData (Entity _ c) _ _) -> - AddressInfo a (Just $ walletCoinValue c) True - _ -> AddressInfo a Nothing False - in map toInfo allInAddrs - , walletTxOutputs = - let toInfo (a,i,v) = AddressInfo a (Just v) $ ours i - ours i = isJust $ find ((== i) . outCoinDataPos) outCoins - in map toInfo allOutAddrs \\ changeAddrs - , walletTxChange = changeAddrs - , walletTxTx = tx - , walletTxIsCoinbase = isCoinbaseTx tx - , walletTxConfidence = confidence - -- Reuse the confirmation information of the existing transaction if - -- we have it. - , walletTxConfirmedBy = Nothing - , walletTxConfirmedHeight = Nothing - , walletTxConfirmedDate = Nothing - , walletTxCreated = now - } - where - -- The value going into the account is the sum of the output coins - inVal = sum $ map outCoinDataValue outCoins - -- The value going out of the account is the sum on the input coins - outVal = sum $ map coinValue inCoins - allMyCoins = length inCoins == length (txIn tx) && - length outCoins == length (txOut tx) - txType - -- If all the coins belong to the same account, it is a self - -- transaction (even if a fee was payed). - | allMyCoins = TxSelf - -- This case can happen in complex transactions where the total - -- input/output sum for a given account is 0. In this case, we count - -- that transaction as a TxSelf. This should not happen with simple - -- transactions. - | inVal == outVal = TxSelf - | inVal > outVal = TxIncoming - | otherwise = TxOutgoing - -- List of all the decodable input addresses in the transaction - allInAddrs = - let f inp = do - input <- decodeInputBS (scriptInput inp) - addr <- inputAddress input - return (addr, prevOutput inp) - in rights $ map f $ txIn tx - -- List of all the decodable output addresses in the transaction - allOutAddrs = - let f op i = do - addr <- outputAddress =<< decodeOutputBS (scriptOutput op) - return (addr, i, outValue op) - in rights $ zipWith f (txOut tx) [0..] - changeAddrs - | txType == TxIncoming = [] - | otherwise = - let isInternal = (== AddressInternal) . walletAddrType - . entityVal . outCoinDataAddr - f = walletAddrAddress . entityVal . outCoinDataAddr - toInfo c = AddressInfo (f c) (Just $ outCoinDataValue c) True - in map toInfo $ filter isInternal outCoins - --- Group all the input and outputs coins from the same account together. -groupCoinsByAccount - :: [InCoinData] - -> [OutCoinData] - -> M.Map AccountId ([InCoinData], [OutCoinData]) -groupCoinsByAccount inCoins outCoins = - M.unionWith merge inMap outMap - where - -- Build a map from accounts -> (inCoins, outCoins) - f coin@(InCoinData _ t _) = (walletTxAccount t, [coin]) - g coin = (walletAddrAccount $ entityVal $ outCoinDataAddr coin, [coin]) - merge (is, _) (_, os) = (is, os) - inMap = M.map (\is -> (is, [])) $ M.fromListWith (++) $ map f inCoins - outMap = M.map (\os -> ([], os)) $ M.fromListWith (++) $ map g outCoins - -deleteTx :: (MonadIO m, MonadThrow m) => TxHash -> SqlPersistT m () -deleteTx txid = do - ts <- select $ from $ \t -> do - where_ $ t ^. WalletTxHash ==. val txid - return t - case ts of - [] -> throwM $ WalletException $ unwords - [ "Cannot delete inexistent transaction" - , cs (txHashToHex txid) - ] - Entity{entityVal = WalletTx{walletTxConfidence = TxBuilding}} : _ -> - throwM $ WalletException $ unwords - [ "Cannot delete confirmed transaction" - , cs (txHashToHex txid) - ] - _ -> return () - children <- fmap (map unValue) $ select $ from $ - \(t `InnerJoin` c `InnerJoin` s `InnerJoin` t2) -> do - on $ s ^. SpentCoinSpendingTx ==. t2 ^. WalletTxId - on ( c ^. WalletCoinAccount ==. t ^. WalletTxAccount - &&. c ^. WalletCoinHash ==. s ^. SpentCoinHash - &&. c ^. WalletCoinPos ==. s ^. SpentCoinPos - ) - on $ c ^. WalletCoinTx ==. t ^. WalletTxId - where_ $ t ^. WalletTxHash ==. val txid - return $ t2 ^. WalletTxHash - forM_ children deleteTx - forM_ ts $ \Entity{entityKey = ti} -> - delete $ from $ \s -> where_ $ s ^. SpentCoinSpendingTx ==. val ti - delete $ from $ \s -> where_ $ s ^. SpentCoinHash ==. val txid - forM_ ts $ \Entity{entityKey = ti} -> do - delete $ from $ \c -> where_ $ c ^. WalletCoinTx ==. val ti - delete $ from $ \t -> where_ $ t ^. WalletTxId ==. val ti - --- Kill transactions and their children by ids. -killTxIds :: MonadIO m - => Maybe (TBMChan Notif) - -> [WalletTxId] - -> SqlPersistT m () -killTxIds notifChanM txIds = do - -- Find all the transactions spending the coins of these transactions - -- (Find all the child transactions) - children <- splitSelect txIds $ \ts -> from $ \(t `InnerJoin` s) -> do - on ( s ^. SpentCoinAccount ==. t ^. WalletTxAccount - &&. s ^. SpentCoinHash ==. t ^. WalletTxHash - ) - where_ $ t ^. WalletTxId `in_` valList ts - return $ s ^. SpentCoinSpendingTx - - -- Kill these transactions - splitUpdate txIds $ \ts t -> do - set t [ WalletTxConfidence =. val TxDead ] - where_ $ t ^. WalletTxId `in_` valList ts - - case notifChanM of - Nothing -> return () - Just notifChan -> do - ts' <- fmap (map entityVal) $ - splitSelect txIds $ \ts -> from $ \t -> do - where_ $ t ^. WalletTxId `in_` valList ts - return t - forM_ ts' $ \tx -> do - let ai = walletTxAccount tx - Account{..} <- - fromMaybe (error "More velociraptors coming") <$> get ai - liftIO $ atomically $ writeTBMChan notifChan $ - NotifTx $ toJsonTx accountName Nothing tx - - - -- This transaction doesn't spend any coins - splitDelete txIds $ \ts -> from $ \s -> - where_ $ s ^. SpentCoinSpendingTx `in_` valList ts - - -- Recursively kill all the child transactions. - -- (Recurse at the end in case there are closed loops) - unless (null children) $ killTxIds notifChanM $ nub $ map unValue children - --- Kill transactions and their child transactions by hashes. -killTxs :: MonadIO m - => Maybe (TBMChan Notif) - -> [TxHash] - -> SqlPersistT m () -killTxs notifChanM txHashes = do - res <- splitSelect txHashes $ \hs -> from $ \t -> do - where_ $ t ^. WalletTxHash `in_` valList hs - return $ t ^. WalletTxId - killTxIds notifChanM $ map unValue res - -{- Confirmations -} - -importMerkles :: MonadIO m - => BlockChainAction - -> [MerkleTxs] - -> Maybe (TBMChan Notif) - -> SqlPersistT m () -importMerkles action expTxsLs notifChanM = - when (isBestChain action || isChainReorg action) $ do - case action of - ChainReorg _ os _ -> - -- Unconfirm transactions from the old chain. - let hs = map (Just . nodeHash) os - in splitUpdate hs $ \h t -> do - set t [ WalletTxConfidence =. val TxPending - , WalletTxConfirmedBy =. val Nothing - , WalletTxConfirmedHeight =. val Nothing - , WalletTxConfirmedDate =. val Nothing - ] - where_ $ t ^. WalletTxConfirmedBy `in_` valList h - _ -> return () - - -- Find all the dead transactions which need to be revived - deadTxs <- splitSelect (concat expTxsLs) $ \ts -> from $ \t -> do - where_ ( t ^. WalletTxHash `in_` valList ts - &&. t ^. WalletTxConfidence ==. val TxDead - ) - return $ t ^. WalletTxTx - - -- Revive dead transactions (in no particular order) - forM_ deadTxs $ reviveTx notifChanM . unValue - - -- Confirm the transactions - forM_ (zip (actionNodes action) expTxsLs) $ \(node, hs) -> do - let hash = nodeHash node - height = nodeBlockHeight node - - splitUpdate hs $ \h t -> do - set t [ WalletTxConfidence =. val TxBuilding - , WalletTxConfirmedBy =. val (Just hash) - , WalletTxConfirmedHeight =. - val (Just height) - , WalletTxConfirmedDate =. - val (Just $ nodeTimestamp node) - ] - where_ $ t ^. WalletTxHash `in_` valList h - - ts <- fmap (map entityVal) $ splitSelect hs $ \h -> from $ \t -> do - where_ $ t ^. WalletTxHash `in_` valList h - return t - - -- Update the best height in the wallet (used to compute the number - -- of confirmations of transactions) - setBestBlock hash height - - -- Send notification for block - forM_ notifChanM $ \notifChan -> do - liftIO $ atomically $ writeTBMChan notifChan $ - NotifBlock JsonBlock - { jsonBlockHash = hash - , jsonBlockHeight = height - , jsonBlockPrev = nodePrev node - } - sendTxs notifChan ts hash height - where - sendTxs notifChan ts hash height = forM_ ts $ \tx -> do - let ai = walletTxAccount tx - Account{..} <- fromMaybe (error "Dino crisis") <$> get ai - liftIO $ atomically $ writeTBMChan notifChan $ - NotifTx $ toJsonTx accountName (Just (hash, height)) tx - --- Helper function to set the best block and best block height in the DB. -setBestBlock :: MonadIO m => BlockHash -> Word32 -> SqlPersistT m () -setBestBlock bid i = update $ \t -> set t [ WalletStateBlock =. val bid - , WalletStateHeight =. val i - ] - --- Helper function to get the best block and best block height from the DB -walletBestBlock :: MonadIO m => SqlPersistT m (BlockHash, Word32) -walletBestBlock = do - cfgM <- fmap entityVal <$> P.selectFirst [] [] - return $ case cfgM of - Just WalletState{..} -> (walletStateBlock, walletStateHeight) - Nothing -> throw $ WalletException $ unwords - [ "Could not get the best block." - , "Wallet database is probably not initialized" - ] - --- Revive a dead transaction. All transactions that are in conflict with this --- one will be killed. -reviveTx :: MonadIO m - => Maybe (TBMChan Notif) - -> Tx - -> SqlPersistT m () -reviveTx notifChanM tx = do - -- Kill all transactions spending our coins - spendingTxs <- getSpendingTxs tx Nothing - killTxIds notifChanM $ map entityKey spendingTxs - - -- Find all the WalletTxId that have to be revived - ids <- select $ from $ \t -> do - where_ $ t ^. WalletTxHash ==. val (txHash tx) - &&. t ^. WalletTxConfidence ==. val TxDead - return (t ^. WalletTxAccount, t ^. WalletTxId) - - -- Spend the inputs for all our transactions - forM_ ids $ \(Value ai, Value ti) -> spendInputs ai ti tx - - let ids' = map (unValue . snd) ids - -- Update the transactions - splitUpdate ids' $ \is t -> do - set t [ WalletTxConfidence =. val TxPending - , WalletTxConfirmedBy =. val Nothing - , WalletTxConfirmedHeight =. val Nothing - , WalletTxConfirmedDate =. val Nothing - ] - where_ $ t ^. WalletTxId `in_` valList is - - case notifChanM of - Nothing -> return () - Just notifChan -> do - ts' <- fmap (map entityVal) $ - splitSelect ids' $ \ts -> from $ \t -> do - where_ $ t ^. WalletTxId `in_` valList ts - return t - forM_ ts' $ \tx' -> do - let ai = walletTxAccount tx' - Account{..} <- - fromMaybe (error "Tyranossaurus Rex attacks") <$> get ai - liftIO $ atomically $ writeTBMChan notifChan $ - NotifTx $ toJsonTx accountName Nothing tx' - -{- Transaction creation and signing (local wallet functions) -} - --- | Create a transaction sending some coins to a list of recipient addresses. -createWalletTx - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account -- ^ Account Entity - -> Maybe (TBMChan Notif) -- ^ Notification channel - -> Maybe XPrvKey -- ^ Key if not provided by account - -> [(Address,Word64)] -- ^ List of recipient addresses and amounts - -> Word64 -- ^ Fee per byte - -> Word32 -- ^ Minimum confirmations - -> Bool -- ^ Should fee be paid by recipient - -> Bool -- ^ Should the transaction be signed - -> SqlPersistT m (WalletTx, [WalletAddr]) - -- ^ (New transaction hash, Completed flag) -createWalletTx accE@(Entity ai acc) notifM masterM dests fee minConf rcptFee sign = do - -- Build an unsigned transaction from the given recipient values and fee - (unsignedTx, inCoins, newChangeAddrs) <- - buildUnsignedTx accE dests fee minConf rcptFee - -- Sign our new transaction if signing was requested - let dat = map toCoinSignData inCoins - tx | sign = signOfflineTx acc masterM unsignedTx dat - | otherwise = unsignedTx - -- Import the transaction in the wallet either as a network transaction if - -- it is complete, or as an offline transaction otherwise. - (res, newAddrs) <- importTx' tx notifM ai inCoins - case res of - (txRes:_) -> return (txRes, newAddrs ++ newChangeAddrs) - _ -> liftIO . throwIO $ WalletException - "Error while importing the new transaction" - -toCoinSignData :: InCoinData -> CoinSignData -toCoinSignData (InCoinData (Entity _ c) t x) = - CoinSignData (OutPoint (walletTxHash t) (walletCoinPos c)) - (walletCoinScript c) - deriv - where - deriv = Deriv :/ addrTypeIndex (walletAddrType x) :/ walletAddrIndex x - --- Build an unsigned transaction given a list of recipients and a fee. Returns --- the unsigned transaction together with the input coins that have been --- selected or spending. -buildUnsignedTx - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account - -> [(Address, Word64)] - -> Word64 - -> Word32 - -> Bool - -> SqlPersistT m (Tx, [InCoinData], [WalletAddr]) - -- ^ Generated change addresses -buildUnsignedTx _ [] _ _ _ = liftIO . throwIO $ WalletException - "buildUnsignedTx: No transaction recipients have been provided" -buildUnsignedTx accE@(Entity ai acc) origDests origFee minConf rcptFee = do - let p = case accountType acc of - AccountMultisig m n -> (m, n) - _ -> throw . WalletException $ "Invalid account type" - fee = if rcptFee then 0 else origFee - nOut = length origDests + 1 -- + 1 for the change address - coins | isMultisigAccount acc = chooseMSCoins tot fee p nOut True - | otherwise = chooseCoins tot fee nOut True - -- TODO: Add more policies like confirmations or coin age - -- Sort coins by their values in descending order - orderPolicy c _ = [desc $ c ^. WalletCoinValue] - - -- Find the spendable coins in the given account with the required - -- number of minimum confirmations. - selectRes <- spendableCoins ai minConf orderPolicy - -- Find a selection of spendable coins that matches our target value - let (selected, change) = - either (throw . WalletException) id $ coins selectRes - totFee | isMultisigAccount acc = - guessMSTxFee origFee p nOut (length selected) - | otherwise = guessTxFee origFee nOut (length selected) - -- Subtract fees from first destination if rcptFee - value = snd $ head origDests - - -- First output must not be dust after deducting fees - when (rcptFee && value < totFee + 5430) $ throw $ WalletException - "First recipient cannot cover transaction fees" - - -- Subtract fees from first destination if rcptFee - let dests | rcptFee = - second (const $ value - totFee) (head origDests) : - tail origDests - | otherwise = origDests - - -- Make sure the first recipient has enough funds to cover the fee - when (snd (head dests) <= 0) $ throw $ - WalletException "Transaction fees too high" - - -- If the change amount is not dust, we need to add a change address to - -- our list of recipients. - -- TODO: Put the dust value in a constant somewhere. We also need a more - -- general way of detecting dust such as our transactions are not - -- rejected by full nodes. - (allDests, addrs) <- if change < 5430 - then return (dests, []) - else do - (addr, chng) <- newChangeAddr change - return ((walletAddrAddress addr, chng) : dests, [addr]) - - case buildAddrTx (map toOutPoint selected) $ map toBase58 allDests of - Right tx -> return (tx, selected, addrs) - Left err -> liftIO . throwIO $ WalletException err - where - tot = sum $ map snd origDests - toBase58 (a, v) = (addrToBase58 a, v) - toOutPoint (InCoinData (Entity _ c) t _) = - OutPoint (walletTxHash t) (walletCoinPos c) - newChangeAddr change = do - let lq = ListRequest 0 0 False - (as, _) <- unusedAddresses accE AddressInternal lq - case as of - (a:_) -> do - -- Use the address to prevent reusing it again - _ <- useAddress a - -- TODO: Randomize the change position - return (a, change) - _ -> liftIO . throwIO $ WalletException - "No unused addresses available" - -signAccountTx :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => Entity Account - -> Maybe (TBMChan Notif) - -> Maybe XPrvKey - -> TxHash - -> SqlPersistT m ([WalletTx], [WalletAddr]) -signAccountTx (Entity ai acc) notifChanM masterM txid = do - (OfflineTxData tx dat, inCoins) <- getOfflineTxData ai txid - let signedTx = signOfflineTx acc masterM tx dat - importTx' signedTx notifChanM ai inCoins - -getOfflineTxData - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => AccountId - -> TxHash - -> SqlPersistT m (OfflineTxData, [InCoinData]) -getOfflineTxData ai txid = do - txM <- getBy $ UniqueAccTx ai txid - case txM of - Just (Entity _ tx) -> do - unless (walletTxConfidence tx == TxOffline) $ liftIO . throwIO $ - WalletException "Can only sign offline transactions." - inCoins <- getInCoins (walletTxTx tx) $ Just ai - return - ( OfflineTxData (walletTxTx tx) $ map toCoinSignData inCoins - , inCoins - ) - _ -> liftIO . throwIO $ WalletException $ unwords - [ "Invalid txid", cs $ txHashToHex txid ] - --- Sign a transaction using a list of CoinSignData. This allows an offline --- signer without access to the coins to sign a given transaction. -signOfflineTx :: Account -- ^ Account used for signing - -> Maybe XPrvKey -- ^ Key if not provided in account - -> Tx -- ^ Transaction to sign - -> [CoinSignData] -- ^ Input signing data - -> Tx -signOfflineTx acc masterM tx coinSignData - | null myMasters = throw $ WalletException "Invalid master key" - | otherwise = either (throw . WalletException) id $ - signTx tx sigData $ map (toPrvKeyG . xPrvKey) prvKeys - where - -- Compute all the SigInputs - sigData = map (toSigData acc) coinSignData - -- Compute all the private keys - prvKeys = concatMap toPrvKeys coinSignData - -- Build a SigInput from a CoinSignData - toSigData acc' (CoinSignData op so deriv) = - -- TODO: Here we override the SigHash to be SigAll False all the time. - -- Should we be more flexible? - SigInput so op (SigAll False) $ - if isMultisigAccount acc - then Just $ getPathRedeem acc' deriv - else Nothing - toPrvKeys (CoinSignData _ _ deriv) = map (derivePath deriv) myMasters - allMasters = case masterM of - Just m -> rootToAccKeys m $ accountKeys acc - _ -> [fromMaybe (throw err) (accountMaster acc)] - err = WalletException "No extended private key available" - myMasters = filter ((`elem` accountKeys acc) . deriveXPubKey) allMasters - --- Returns unspent coins that can be spent in an account that have a minimum --- number of confirmations. Coinbase coins can only be spent after 100 --- confirmations. -spendableCoins - :: (MonadIO m, MonadThrow m, MonadBase IO m, MonadResource m) - => AccountId -- ^ Account key - -> Word32 -- ^ Minimum confirmations - -> ( SqlExpr (Entity WalletCoin) - -> SqlExpr (Entity WalletTx) - -> [SqlExpr OrderBy] - ) - -- ^ Coin ordering policy - -> SqlPersistT m [InCoinData] -- ^ Spendable coins -spendableCoins ai minConf orderPolicy = - fmap (map f) $ select $ spendableCoinsFrom ai minConf orderPolicy - where - f (c, t, x) = InCoinData c (entityVal t) (entityVal x) - -spendableCoinsFrom - :: AccountId -- ^ Account key - -> Word32 -- ^ Minimum confirmations - -> ( SqlExpr (Entity WalletCoin) - -> SqlExpr (Entity WalletTx) - -> [SqlExpr OrderBy] - ) - -- ^ Coin ordering policy - -> SqlQuery ( SqlExpr (Entity WalletCoin) - , SqlExpr (Entity WalletTx) - , SqlExpr (Entity WalletAddr) - ) -spendableCoinsFrom ai minConf orderPolicy = - from $ \(c `InnerJoin` t `InnerJoin` x `LeftOuterJoin` s) -> do - -- Joins have to be set in reverse order ! - -- Left outer join on spent coins - on ( s ?. SpentCoinAccount ==. just (c ^. WalletCoinAccount) - &&. s ?. SpentCoinHash ==. just (c ^. WalletCoinHash) - &&. s ?. SpentCoinPos ==. just (c ^. WalletCoinPos) - ) - on $ x ^. WalletAddrId ==. c ^. WalletCoinAddr - -- Inner join on coins and transactions - on $ t ^. WalletTxId ==. c ^. WalletCoinTx - where_ ( c ^. WalletCoinAccount ==. val ai - &&. t ^. WalletTxConfidence - `in_` valList [ TxPending, TxBuilding ] - -- We only want unspent coins - &&. E.isNothing (s ?. SpentCoinId) - &&. limitConfirmations (Right t) minConf - ) - orderBy (orderPolicy c t) - return (c, t, x) - --- If the current height is 200 and a coin was confirmed at height 198, then it --- has 3 confirmations. So, if we require 3 confirmations, we want coins with a --- confirmed height of 198 or less (200 - 3 + 1). -limitConfirmations :: Either (SqlExpr (Maybe (Entity WalletTx))) - (SqlExpr (Entity WalletTx)) - -> Word32 - -> SqlExpr (Value Bool) -limitConfirmations txE minconf - | minconf == 0 = limitCoinbase - | minconf < 100 = limitConfs minconf &&. limitCoinbase - | otherwise = limitConfs minconf - where - limitConfs i = case txE of - Left t -> t ?. WalletTxConfirmedHeight - <=. just (just (selectHeight -. val (i - 1))) - Right t -> t ^. WalletTxConfirmedHeight - <=. just (selectHeight -. val (i - 1)) - -- Coinbase transactions require 100 confirmations - limitCoinbase = case txE of - Left t -> - not_ (coalesceDefault [t ?. WalletTxIsCoinbase] (val False)) ||. - limitConfs 100 - Right t -> - not_ (t ^. WalletTxIsCoinbase) ||. limitConfs 100 - selectHeight :: SqlExpr (Value Word32) - selectHeight = sub_select $ from $ \co -> do - limit 1 - return $ co ^. WalletStateHeight - -{- Balances -} - -accountBalance :: MonadIO m - => AccountId - -> Word32 - -> Bool - -> SqlPersistT m Word64 -accountBalance ai minconf offline = do - res <- select $ from $ \(c `InnerJoin` - t `LeftOuterJoin` s `LeftOuterJoin` st) -> do - on $ st ?. WalletTxId ==. s ?. SpentCoinSpendingTx - on ( s ?. SpentCoinAccount ==. just (c ^. WalletCoinAccount) - &&. s ?. SpentCoinHash ==. just (c ^. WalletCoinHash) - &&. s ?. SpentCoinPos ==. just (c ^. WalletCoinPos) - ) - on $ t ^. WalletTxId ==. c ^. WalletCoinTx - let unspent = E.isNothing ( s ?. SpentCoinId ) - spentOffline = st ?. WalletTxConfidence ==. just (val TxOffline) - cond = c ^. WalletCoinAccount ==. val ai - &&. t ^. WalletTxConfidence `in_` valList validConfidence - -- For non-offline balances, we have to take into account - -- the coins which are spent by offline transactions. - &&. if offline then unspent else unspent ||. spentOffline - where_ $ if minconf == 0 - then cond - else cond &&. limitConfirmations (Right t) minconf - return $ sum_ (c ^. WalletCoinValue) - case res of - (Value (Just s):_) -> return $ floor (s :: Double) - _ -> return 0 - where - validConfidence = TxPending : TxBuilding : [ TxOffline | offline ] - -addressBalances :: MonadIO m - => Entity Account - -> KeyIndex - -> KeyIndex - -> AddressType - -> Word32 - -> Bool - -> SqlPersistT m [(KeyIndex, BalanceInfo)] -addressBalances accE@(Entity ai _) iMin iMax addrType minconf offline = do - -- We keep our joins flat to improve performance in SQLite. - res <- select $ from $ \(x `LeftOuterJoin` c `LeftOuterJoin` - t `LeftOuterJoin` s `LeftOuterJoin` st) -> do - let joinCond = st ?. WalletTxId ==. s ?. SpentCoinSpendingTx - -- Do not join the spending information for offline transactions if we - -- request the online balances. This will count the coin as unspent. - on $ if offline - then joinCond - else joinCond &&. - st ?. WalletTxConfidence !=. just (val TxOffline) - on $ s ?. SpentCoinAccount ==. c ?. WalletCoinAccount - &&. s ?. SpentCoinHash ==. c ?. WalletCoinHash - &&. s ?. SpentCoinPos ==. c ?. WalletCoinPos - let txJoin = t ?. WalletTxId ==. c ?. WalletCoinTx - &&. t ?. WalletTxConfidence `in_` valList validConfidence - on $ if minconf == 0 - then txJoin - else txJoin &&. limitConfirmations (Left t) minconf - on $ c ?. WalletCoinAddr ==. just (x ^. WalletAddrId) - let limitIndex - | iMin == iMax = x ^. WalletAddrIndex ==. val iMin - | otherwise = x ^. WalletAddrIndex >=. val iMin - &&. x ^. WalletAddrIndex <=. val iMax - where_ ( x ^. WalletAddrAccount ==. val ai - &&. limitIndex - &&. x ^. WalletAddrIndex <. subSelectAddrCount accE addrType - &&. x ^. WalletAddrType ==. val addrType - ) - groupBy $ x ^. WalletAddrIndex - let unspent = E.isNothing $ st ?. WalletTxId - invalidTx = E.isNothing $ t ?. WalletTxId - return ( x ^. WalletAddrIndex -- Address index - , sum_ $ case_ - [ when_ invalidTx - then_ (val (Just 0)) - ] (else_ $ c ?. WalletCoinValue) -- Out value - , sum_ $ case_ - [ when_ (unspent ||. invalidTx) - then_ (val (Just 0)) - ] (else_ $ c ?. WalletCoinValue) -- Out value - , count $ t ?. WalletTxId -- New coins - , count $ case_ - [ when_ invalidTx - then_ (val Nothing) - ] (else_ $ st ?. WalletTxId) -- Spent coins - ) - return $ map f res - where - validConfidence = Just TxPending : Just TxBuilding : - [ Just TxOffline | offline ] - f (Value i, Value inM, Value outM, Value newC, Value spentC) = - let b = BalanceInfo - { balanceInfoInBalance = - floor $ fromMaybe (0 :: Double) inM - , balanceInfoOutBalance = - floor $ fromMaybe (0 :: Double) outM - , balanceInfoCoins = newC - , balanceInfoSpentCoins = spentC - } - in (i, b) - -{- Rescans -} - -resetRescan :: MonadIO m => SqlPersistT m () -resetRescan = do - P.deleteWhere ([] :: [P.Filter WalletCoin]) - P.deleteWhere ([] :: [P.Filter SpentCoin]) - P.deleteWhere ([] :: [P.Filter WalletTx]) - setBestBlock (headerHash genesisHeader) 0 - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Types.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Types.hs deleted file mode 100644 index 3fa32c28..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Types.hs +++ /dev/null @@ -1,763 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Network.Haskoin.Wallet.Types -( AccountName - --- JSON Types -, JsonAccount(..) -, JsonAddr(..) -, JsonCoin(..) -, JsonTx(..) - --- Request Types -, WalletRequest(..) -, ListRequest(..) -, NewAccount(..) -, OfflineTxData(..) -, CoinSignData(..) -, CreateTx(..) -, SignTx(..) -, NodeAction(..) -, AccountType(..) -, AddressType(..) -, addrTypeIndex -, TxType(..) -, TxConfidence(..) -, AddressInfo(..) -, BalanceInfo(..) - --- Response Types -, WalletResponse(..) -, TxCompleteRes(..) -, ListResult(..) -, RescanRes(..) -, JsonSyncBlock(..) -, JsonBlock(..) -, Notif(..) -, BlockInfo(..) - --- Helper Types -, WalletException(..) -, BTCNode(..) - --- *Helpers -, splitSelect -, splitUpdate -, splitDelete -, splitInsertMany_ -, join2 -, limitOffset -) where - -import Control.DeepSeq (NFData (..)) -import Control.Exception (Exception) -import Control.Monad -import Control.Monad.Trans (MonadIO) -import Data.Aeson -import Data.Aeson.TH -import qualified Data.ByteString as BS -import Data.Char (toLower) -import Data.Int (Int64) -import Data.List.Split (chunksOf) -import Data.Maybe (maybeToList) -import qualified Data.Serialize as S -import Data.String (fromString) -import Data.String.Conversions (cs) -import Data.Text (Text) -import Data.Time (UTCTime) -import Data.Typeable (Typeable) -import Data.Word (Word32, Word64, Word8) -import qualified Database.Esqueleto as E -import Database.Esqueleto.Internal.Sql (SqlSelect) -import qualified Database.Persist as P -import Database.Persist.Class -import Database.Persist.Sql -import GHC.Generics -import Network.Haskoin.Block -import Network.Haskoin.Crypto -import Network.Haskoin.Network -import Network.Haskoin.Script -import Network.Haskoin.Transaction -import Network.Haskoin.Util -import Network.Haskoin.Wallet.Database -import Network.Haskoin.Wallet.Types.BlockInfo - -type AccountName = Text - --- TODO: Add NFData instances for all those types - -{- Request Types -} - -data TxType - = TxIncoming - | TxOutgoing - | TxSelf - deriving (Eq, Show, Read) - -instance NFData TxType where - rnf x = x `seq` () - -$(deriveJSON (dropSumLabels 2 0 "") ''TxType) - -data TxConfidence - = TxOffline - | TxDead - | TxPending - | TxBuilding - deriving (Eq, Show, Read) - -instance NFData TxConfidence where - rnf x = x `seq` () - -$(deriveJSON (dropSumLabels 2 0 "") ''TxConfidence) - -data AddressInfo = AddressInfo - { addressInfoAddress :: !Address - , addressInfoValue :: !(Maybe Word64) - , addressInfoIsLocal :: !Bool - } - deriving (Eq, Show, Read, Generic) - -instance S.Serialize AddressInfo - -$(deriveJSON (dropFieldLabel 11) ''AddressInfo) - -instance NFData AddressInfo where - rnf AddressInfo{..} = - rnf addressInfoAddress `seq` - rnf addressInfoValue `seq` - rnf addressInfoIsLocal - -data BalanceInfo = BalanceInfo - { balanceInfoInBalance :: !Word64 - , balanceInfoOutBalance :: !Word64 - , balanceInfoCoins :: !Int - , balanceInfoSpentCoins :: !Int - } - deriving (Eq, Show, Read) - -$(deriveJSON (dropFieldLabel 11) ''BalanceInfo) - -instance NFData BalanceInfo where - rnf BalanceInfo{..} = - rnf balanceInfoInBalance `seq` - rnf balanceInfoOutBalance `seq` - rnf balanceInfoCoins `seq` - rnf balanceInfoSpentCoins - -data AccountType - = AccountRegular - | AccountMultisig - { accountTypeRequiredSigs :: !Int - , accountTypeTotalKeys :: !Int - } - deriving (Eq, Show, Read) - -instance NFData AccountType where - rnf t = case t of - AccountRegular -> () - AccountMultisig m n -> rnf m `seq` rnf n - -instance ToJSON AccountType where - toJSON accType = case accType of - AccountRegular -> object - [ "type" .= String "regular" ] - AccountMultisig m n -> object - [ "type" .= String "multisig" - , "requiredsigs" .= m - , "totalkeys" .= n - ] - -instance FromJSON AccountType where - parseJSON = withObject "AccountType" $ \o -> - o .: "type" >>= \t -> case (t :: Text) of - "regular" -> return AccountRegular - "multisig" -> AccountMultisig <$> o .: "requiredsigs" - <*> o .: "totalkeys" - _ -> mzero - -data NewAccount = NewAccount - { newAccountName :: !AccountName - , newAccountType :: !AccountType - , newAccountMnemonic :: !(Maybe Text) - , newAccountPassword :: !(Maybe Text) - , newAccountEntropy :: !(Maybe Word8) - , newAccountMaster :: !(Maybe XPrvKey) - , newAccountDeriv :: !(Maybe KeyIndex) - , newAccountKeys :: ![XPubKey] - , newAccountReadOnly :: !Bool - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 10) ''NewAccount) - -data ListRequest = ListRequest - { listOffset :: !Word32 - , listLimit :: !Word32 - , listReverse :: !Bool - } - deriving (Eq, Show, Read) - -$(deriveJSON (dropFieldLabel 4) ''ListRequest) - -data CoinSignData = CoinSignData - { coinSignOutPoint :: !OutPoint - , coinSignScriptOutput :: !ScriptOutput - , coinSignDeriv :: !SoftPath - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 8) ''CoinSignData) - -instance S.Serialize CoinSignData where - get = do - p <- S.get - s <- readBS >>= \bs -> case decodeOutputBS bs of - Right s -> return s - _ -> error "Invalid ScriptOutput in CoinSignData" - S.get >>= \dM -> case toSoft (dM :: DerivPath) of - Just d -> return $ CoinSignData p s d - _ -> error "Invalid derivation in CoinSignData" - where - readBS = S.get >>= \(VarInt l) -> S.getByteString $ fromIntegral l - - put (CoinSignData p s d) = do - S.put p - writeBS $ encodeOutputBS s - S.put $ toGeneric d - where - writeBS bs = do - S.put $ VarInt $ fromIntegral $ BS.length bs - S.putByteString bs - -data OfflineTxData = OfflineTxData - { offlineTxDataTx :: !Tx - , offlineTxDataCoinData :: ![CoinSignData] - } - -$(deriveJSON (dropFieldLabel 13) ''OfflineTxData) - -instance S.Serialize OfflineTxData where - get = OfflineTxData <$> S.get <*> (replicateList =<< S.get) - where - replicateList (VarInt c) = replicateM (fromIntegral c) S.get - - put (OfflineTxData t ds) = do - S.put t - S.put $ VarInt $ fromIntegral $ length ds - forM_ ds S.put - -data CreateTx = CreateTx - { createTxRecipients :: ![(Address, Word64)] - , createTxFee :: !Word64 - , createTxMinConf :: !Word32 - , createTxRcptFee :: !Bool - , createTxSign :: !Bool - , createTxSignKey :: !(Maybe XPrvKey) - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 8) ''CreateTx) - -data SignTx = SignTx - { signTxTxHash :: !TxHash - , signTxSignKey :: !(Maybe XPrvKey) - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 6) ''SignTx) - -data NodeAction - = NodeActionRescan { nodeActionTimestamp :: !(Maybe Word32) } - | NodeActionStatus - deriving (Eq, Show, Read) - -instance ToJSON NodeAction where - toJSON na = case na of - NodeActionRescan tM -> object $ - ("type" .= String "rescan") : (("timestamp" .=) <$> maybeToList tM) - NodeActionStatus -> object [ "type" .= String "status" ] - -instance FromJSON NodeAction where - parseJSON = withObject "NodeAction" $ \o -> do - String t <- o .: "type" - case t of - "rescan" -> NodeActionRescan <$> o .:? "timestamp" - "status" -> return NodeActionStatus - _ -> mzero - -data AddressType - = AddressInternal - | AddressExternal - deriving (Eq, Show, Read) - -$(deriveJSON (dropSumLabels 7 0 "") ''AddressType) - -instance NFData AddressType where - rnf x = x `seq` () - -addrTypeIndex :: AddressType -> KeyIndex -addrTypeIndex AddressExternal = 0 -addrTypeIndex AddressInternal = 1 - -data WalletRequest - = AccountReq !AccountName - | AccountsReq !ListRequest - | NewAccountReq !NewAccount - | RenameAccountReq !AccountName !AccountName - | AddPubKeysReq !AccountName ![XPubKey] - | SetAccountGapReq !AccountName !Word32 - | AddrsReq !AccountName !AddressType !Word32 !Bool !ListRequest - | UnusedAddrsReq !AccountName !AddressType !ListRequest - | AddressReq !AccountName !KeyIndex !AddressType !Word32 !Bool - | PubKeyIndexReq !AccountName !PubKeyC !AddressType - | SetAddrLabelReq !AccountName !KeyIndex !AddressType !Text - | GenerateAddrsReq !AccountName !KeyIndex !AddressType - | TxsReq !AccountName !ListRequest - | PendingTxsReq !AccountName !ListRequest - | DeadTxsReq !AccountName !ListRequest - | AddrTxsReq !AccountName !KeyIndex !AddressType !ListRequest - | CreateTxReq !AccountName !CreateTx - | ImportTxReq !AccountName !Tx - | SignTxReq !AccountName !SignTx - | TxReq !AccountName !TxHash - | DeleteTxReq !TxHash - | OfflineTxReq !AccountName !TxHash - | SignOfflineTxReq !AccountName !(Maybe XPrvKey) !Tx ![CoinSignData] - | BalanceReq !AccountName !Word32 !Bool - | NodeActionReq !NodeAction - | SyncReq !AccountName !BlockHash !ListRequest - | SyncHeightReq !AccountName !BlockHeight !ListRequest - | BlockInfoReq ![BlockHash] - | StopServerReq - deriving (Show, Eq) - --- TODO: Set omitEmptyContents on aeson-0.9 -$(deriveJSON - defaultOptions - { constructorTagModifier = map toLower . init - , sumEncoding = defaultTaggedObject - { tagFieldName = "method" - , contentsFieldName = "request" - } - } - ''WalletRequest - ) - -{- JSON Types -} - -data JsonAccount = JsonAccount - { jsonAccountName :: !Text - , jsonAccountType :: !AccountType - , jsonAccountMaster :: !(Maybe XPrvKey) - , jsonAccountMnemonic :: !(Maybe Text) - , jsonAccountKeys :: ![XPubKey] - , jsonAccountGap :: !Word32 - , jsonAccountCreated :: !UTCTime - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 11) ''JsonAccount) - -data JsonAddr = JsonAddr - { jsonAddrAddress :: !Address - , jsonAddrIndex :: !KeyIndex - , jsonAddrType :: !AddressType - , jsonAddrLabel :: !Text - , jsonAddrRedeem :: !(Maybe ScriptOutput) - , jsonAddrKey :: !(Maybe PubKeyC) - , jsonAddrCreated :: !UTCTime - -- Optional Balance - , jsonAddrBalance :: !(Maybe BalanceInfo) - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 8) ''JsonAddr) - -data JsonTx = JsonTx - { jsonTxHash :: !TxHash - , jsonTxNosigHash :: !TxHash - , jsonTxType :: !TxType - , jsonTxInValue :: !Word64 - , jsonTxOutValue :: !Word64 - , jsonTxValue :: !Int64 - , jsonTxInputs :: ![AddressInfo] - , jsonTxOutputs :: ![AddressInfo] - , jsonTxChange :: ![AddressInfo] - , jsonTxTx :: !Tx - , jsonTxIsCoinbase :: !Bool - , jsonTxConfidence :: !TxConfidence - , jsonTxConfirmedBy :: !(Maybe BlockHash) - , jsonTxConfirmedHeight :: !(Maybe Word32) - , jsonTxConfirmedDate :: !(Maybe Word32) - , jsonTxCreated :: !UTCTime - , jsonTxAccount :: !AccountName - -- Optional confirmation - , jsonTxConfirmations :: !(Maybe Word32) - , jsonTxBestBlock :: !(Maybe BlockHash) - , jsonTxBestBlockHeight :: !(Maybe BlockHeight) - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 6) ''JsonTx) - -data JsonCoin = JsonCoin - { jsonCoinHash :: !TxHash - , jsonCoinPos :: !Word32 - , jsonCoinValue :: !Word64 - , jsonCoinScript :: !ScriptOutput - , jsonCoinCreated :: !UTCTime - -- Optional Tx - , jsonCoinTx :: !(Maybe JsonTx) - -- Optional Address - , jsonCoinAddress :: !(Maybe JsonAddr) - -- Optional spender - , jsonCoinSpendingTx :: !(Maybe JsonTx) - } - deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 8) ''JsonCoin) - -{- Response Types -} - -data TxCompleteRes = TxCompleteRes - { txCompleteTx :: !Tx - , txCompleteComplete :: !Bool - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 10) ''TxCompleteRes) - -data ListResult a = ListResult - { listResultItems :: ![a] - , listResultTotal :: !Word32 - } - -$(deriveJSON (dropFieldLabel 10) ''ListResult) - -data RescanRes = RescanRes { rescanTimestamp :: !Word32 } - deriving (Eq, Show, Read) - -$(deriveJSON (dropFieldLabel 6) ''RescanRes) - -data WalletResponse a - = ResponseError { responseError :: !Text } - | ResponseValid { responseResult :: !(Maybe a) } - deriving (Eq, Show) - -$(deriveJSON (dropSumLabels 8 8 "status") ''WalletResponse) - -data JsonSyncBlock = JsonSyncBlock - { jsonSyncBlockHash :: !BlockHash - , jsonSyncBlockHeight :: !BlockHeight - , jsonSyncBlockPrev :: !BlockHash - , jsonSyncBlockTxs :: ![JsonTx] - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 13) ''JsonSyncBlock) - -data JsonBlock = JsonBlock - { jsonBlockHash :: !BlockHash - , jsonBlockHeight :: !BlockHeight - , jsonBlockPrev :: !BlockHash - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 9) ''JsonBlock) - -data Notif - = NotifBlock !JsonBlock - | NotifTx !JsonTx - deriving (Eq, Show) - -$(deriveJSON (dropSumLabels 5 5 "type") ''Notif) - -{- Helper Types -} - -data WalletException = WalletException !String - deriving (Eq, Read, Show, Typeable) - -instance Exception WalletException - -data BTCNode = BTCNode { btcNodeHost :: !String, btcNodePort :: !Int } - deriving (Eq, Read, Show) - -$(deriveJSON (dropFieldLabel 7) ''BTCNode) - -{- Persistent Instances -} - -instance PersistField XPrvKey where - toPersistValue = PersistText . cs . xPrvExport - fromPersistValue (PersistText txt) = - maybeToEither "Invalid Persistent XPrvKey" $ xPrvImport $ cs txt - fromPersistValue (PersistByteString bs) = - maybeToEither "Invalid Persistent XPrvKey" $ xPrvImport bs - fromPersistValue _ = Left "Invalid Persistent XPrvKey" - -instance PersistFieldSql XPrvKey where - sqlType _ = SqlString - -instance PersistField [XPubKey] where - toPersistValue = PersistText . cs . encode - fromPersistValue (PersistText txt) = - maybeToEither "Invalid Persistent XPubKey" $ decodeStrict' $ cs txt - fromPersistValue (PersistByteString bs) = - maybeToEither "Invalid Persistent XPubKey" $ decodeStrict' bs - fromPersistValue _ = Left "Invalid Persistent XPubKey" - -instance PersistFieldSql [XPubKey] where - sqlType _ = SqlString - -instance PersistField DerivPath where - toPersistValue = PersistText . cs . pathToStr - fromPersistValue (PersistText txt) = maybeToEither - "Invalid Persistent DerivPath" . fmap getParsedPath . - parsePath . cs $ txt - fromPersistValue (PersistByteString bs) = maybeToEither - "Invalid Persistent DerivPath" . fmap getParsedPath . - parsePath . cs $ bs - fromPersistValue _ = Left "Invalid Persistent DerivPath" - -instance PersistFieldSql DerivPath where - sqlType _ = SqlString - -instance PersistField HardPath where - toPersistValue = PersistText . cs . pathToStr - fromPersistValue (PersistText txt) = maybeToEither - "Invalid Persistent HardPath" $ parseHard $ cs txt - fromPersistValue (PersistByteString bs) = maybeToEither - "Invalid Persistent HardPath" $ parseHard $ cs bs - fromPersistValue _ = Left "Invalid Persistent HardPath" - -instance PersistFieldSql HardPath where - sqlType _ = SqlString - -instance PersistField SoftPath where - toPersistValue = PersistText . cs . pathToStr - fromPersistValue (PersistText txt) = maybeToEither - "Invalid Persistent SoftPath" $ parseSoft $ cs txt - fromPersistValue (PersistByteString bs) = maybeToEither - "Invalid Persistent SoftPath" $ parseSoft $ cs bs - fromPersistValue _ = Left "Invalid Persistent SoftPath" - -instance PersistFieldSql SoftPath where - sqlType _ = SqlString - -instance PersistField AccountType where - toPersistValue = PersistText . cs . encode - fromPersistValue (PersistText txt) = maybeToEither - "Invalid Persistent AccountType" $ decodeStrict' $ cs txt - fromPersistValue (PersistByteString bs) = maybeToEither - "Invalid Persistent AccountType" $ decodeStrict' bs - fromPersistValue _ = Left "Invalid Persistent AccountType" - -instance PersistFieldSql AccountType where - sqlType _ = SqlString - -instance PersistField AddressType where - toPersistValue ts = PersistBool $ case ts of - AddressExternal -> True - AddressInternal -> False - - fromPersistValue (PersistBool b) = return $ - if b then AddressExternal else AddressInternal - - fromPersistValue (PersistInt64 i) = return $ case i of - 0 -> AddressInternal - _ -> AddressExternal - - fromPersistValue _ = Left "Invalid Persistent AddressType" - -instance PersistFieldSql AddressType where - sqlType _ = SqlBool - -instance PersistField TxType where - toPersistValue ts = PersistText $ case ts of - TxIncoming -> "incoming" - TxOutgoing -> "outgoing" - TxSelf -> "self" - - fromPersistValue (PersistText txt) = case txt of - "incoming" -> return TxIncoming - "outgoing" -> return TxOutgoing - "self" -> return TxSelf - _ -> Left "Invalid Persistent TxType" - - fromPersistValue (PersistByteString bs) = case bs of - "incoming" -> return TxIncoming - "outgoing" -> return TxOutgoing - "self" -> return TxSelf - _ -> Left "Invalid Persistent TxType" - - fromPersistValue _ = Left "Invalid Persistent TxType" - -instance PersistFieldSql TxType where - sqlType _ = SqlString - -instance PersistField Address where - toPersistValue = PersistText . cs . addrToBase58 - fromPersistValue (PersistText a) = - maybeToEither "Invalid Persistent Address" $ base58ToAddr $ cs a - fromPersistValue (PersistByteString a) = - maybeToEither "Invalid Persistent Address" $ base58ToAddr a - fromPersistValue _ = Left "Invalid Persistent Address" - -instance PersistFieldSql Address where - sqlType _ = SqlString - -instance PersistField BloomFilter where - toPersistValue = PersistByteString . S.encode - fromPersistValue (PersistByteString bs) = - case S.decode bs of - Right x -> Right x - Left e -> Left (fromString e) - fromPersistValue _ = Left "Invalid Persistent BloomFilter" - -instance PersistFieldSql BloomFilter where - sqlType _ = SqlBlob - -instance PersistField BlockHash where - toPersistValue = PersistText . cs . blockHashToHex - fromPersistValue (PersistText h) = - maybeToEither "Could not decode BlockHash" $ hexToBlockHash $ cs h - fromPersistValue (PersistByteString h) = - maybeToEither "Could not decode BlockHash" $ hexToBlockHash h - fromPersistValue _ = Left "Invalid Persistent BlockHash" - -instance PersistFieldSql BlockHash where - sqlType _ = SqlString - -instance PersistField TxHash where - toPersistValue = PersistText . cs . txHashToHex - fromPersistValue (PersistText h) = - maybeToEither "Invalid Persistent TxHash" $ hexToTxHash $ cs h - fromPersistValue (PersistByteString h) = - maybeToEither "Invalid Persistent TxHash" $ hexToTxHash h - fromPersistValue _ = Left "Invalid Persistent TxHash" - -instance PersistFieldSql TxHash where - sqlType _ = SqlString - -instance PersistField TxConfidence where - toPersistValue tc = PersistText $ case tc of - TxOffline -> "offline" - TxDead -> "dead" - TxPending -> "pending" - TxBuilding -> "building" - - fromPersistValue (PersistText txt) = case txt of - "offline" -> return TxOffline - "dead" -> return TxDead - "pending" -> return TxPending - "building" -> return TxBuilding - _ -> Left "Invalid Persistent TxConfidence" - - fromPersistValue (PersistByteString bs) = case bs of - "offline" -> return TxOffline - "dead" -> return TxDead - "pending" -> return TxPending - "building" -> return TxBuilding - _ -> Left "Invalid Persistent TxConfidence" - - fromPersistValue _ = Left "Invalid Persistent TxConfidence" - -instance PersistFieldSql TxConfidence where - sqlType _ = SqlString - -instance PersistField Tx where - toPersistValue = PersistByteString . S.encode - fromPersistValue (PersistByteString bs) = - case S.decode bs of - Right x -> Right x - Left e -> Left (fromString e) - fromPersistValue _ = Left "Invalid Persistent Tx" - -instance PersistFieldSql Tx where - sqlType _ = SqlOther "MEDIUMBLOB" - -instance PersistField PubKeyC where - toPersistValue = PersistText . cs . encodeHex . S.encode - fromPersistValue (PersistText txt) = - case hex >>= S.decode of - Right x -> Right x - Left e -> Left (fromString e) - where - hex = maybeToEither "Could not decode hex" (decodeHex (cs txt)) - fromPersistValue (PersistByteString bs) = - case hex >>= S.decode of - Right x -> Right x - Left e -> Left (fromString e) - where - hex = maybeToEither "Could not decode hex" (decodeHex bs) - fromPersistValue _ = Left "Invalid Persistent PubKeyC" - -instance PersistFieldSql PubKeyC where - sqlType _ = SqlString - -instance PersistField ScriptOutput where - toPersistValue = PersistByteString . encodeOutputBS - fromPersistValue (PersistByteString bs) = - case decodeOutputBS bs of - Right x -> Right x - Left e -> Left (fromString e) - fromPersistValue _ = Left "Invalid Persistent ScriptOutput" - -instance PersistFieldSql ScriptOutput where - sqlType _ = SqlBlob - -instance PersistField [AddressInfo] where - toPersistValue = PersistByteString . S.encode - fromPersistValue (PersistByteString bs) = - case S.decode bs of - Right x -> Right x - Left e -> Left (fromString e) - fromPersistValue _ = Left "Invalid Persistent AddressInfo" - -instance PersistFieldSql [AddressInfo] where - sqlType _ = SqlOther "MEDIUMBLOB" - -{- Helpers -} - --- Join AND expressions with OR conditions in a binary way -join2 :: [E.SqlExpr (E.Value Bool)] -> E.SqlExpr (E.Value Bool) -join2 xs = case xs of - [] -> E.val False - [x] -> x - _ -> let (ls,rs) = splitAt (length xs `div` 2) xs - in join2 ls E.||. join2 rs - -splitSelect :: (SqlSelect a r, MonadIO m) - => [t] - -> ([t] -> E.SqlQuery a) - -> E.SqlPersistT m [r] -splitSelect ts queryF = - fmap concat $ forM vals $ E.select . queryF - where - vals = chunksOf paramLimit ts - -splitUpdate :: ( MonadIO m - , P.PersistEntity val - , P.PersistEntityBackend val ~ E.SqlBackend - ) - => [t] - -> ([t] -> E.SqlExpr (E.Entity val) -> E.SqlQuery ()) - -> E.SqlPersistT m () -splitUpdate ts updateF = - forM_ vals $ E.update . updateF - where - vals = chunksOf paramLimit ts - -splitDelete :: MonadIO m => [t] -> ([t] -> E.SqlQuery ()) -> E.SqlPersistT m () -splitDelete ts deleteF = - forM_ vals $ E.delete . deleteF - where - vals = chunksOf paramLimit ts - -splitInsertMany_ :: ( MonadIO m - , P.PersistEntity val - , P.PersistEntityBackend val ~ E.SqlBackend - ) - => [val] -> E.SqlPersistT m () -splitInsertMany_ = mapM_ P.insertMany_ . chunksOf paramLimit - -limitOffset :: Word32 -> Word32 -> E.SqlQuery () -limitOffset l o = do - when (l > 0) $ E.limit $ fromIntegral l - when (o > 0) $ E.offset $ fromIntegral o - diff --git a/haskoin-wallet/src/Network/Haskoin/Wallet/Types/BlockInfo.hs b/haskoin-wallet/src/Network/Haskoin/Wallet/Types/BlockInfo.hs deleted file mode 100644 index a819f66c..00000000 --- a/haskoin-wallet/src/Network/Haskoin/Wallet/Types/BlockInfo.hs +++ /dev/null @@ -1,77 +0,0 @@ -{-# LANGUAGE TemplateHaskell #-} -module Network.Haskoin.Wallet.Types.BlockInfo -( - BlockInfo(..) -, JsonHash256 -, fromNodeBlock -) -where - -import Data.String.Conversions (cs, ConvertibleStrings(..)) -import Data.Maybe (fromMaybe) -import Data.Aeson.TH (deriveJSON) -import Data.Word (Word32) -import Data.Time (UTCTime) -import Data.ByteString (ByteString) -import Data.Time.Clock.POSIX (posixSecondsToUTCTime) -import Data.Aeson (FromJSON, ToJSON, Value (String), - parseJSON, toJSON, withText) - -import Network.Haskoin.Block -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Crypto -import Network.Haskoin.Util - - -newtype JsonHash256 = JsonHash256 { jsonGetHash256 :: Hash256 } - deriving (Eq, Show) - -jsonHashToHex :: JsonHash256 -> ByteString -jsonHashToHex = blockHashToHex . BlockHash . jsonGetHash256 - -instance ToJSON JsonHash256 where - toJSON = String . cs . jsonHashToHex - -instance FromJSON JsonHash256 where - parseJSON = withText "JsonHash256" $ - return . JsonHash256 . getBlockHash <$> - fromMaybe (error "Invalid 256 bit hash") . - hexToBlockHash . cs - -instance ConvertibleStrings JsonHash256 String where - convertString = cs . jsonHashToHex - -data BlockInfo = BlockInfo - { blockInfoHash :: !BlockHash - , blockInfoHeight :: !BlockHeight - , blockInfoVersion :: !Word32 - , blockInfoTimestamp :: !UTCTime - , blockInfoPrevBlock :: !BlockHash - , blockInfoMerkleRoot :: !JsonHash256 - , blockInfoBits :: !Word32 - , blockInfoNonce :: !Word32 - , blockInfoChain :: !Word32 - , blockInfoChainWork :: !Double - } deriving (Eq, Show) - -$(deriveJSON (dropFieldLabel 9) ''BlockInfo) - -fromNodeBlock :: NodeBlock -> BlockInfo -fromNodeBlock nb = - BlockInfo - { blockInfoHash = headerHash header - , blockInfoVersion = blockVersion header - , blockInfoPrevBlock = prevBlock header - , blockInfoNonce = bhNonce header - , blockInfoBits = blockBits header - , blockInfoMerkleRoot = JsonHash256 $ merkleRoot header - , blockInfoTimestamp = utcTimestamp - , blockInfoChainWork = nWork nb - , blockInfoHeight = nHeight nb - , blockInfoChain = nodeChain nb - } - where - header = nHeader nb - utcTimestamp = posixSecondsToUTCTime . realToFrac . - blockTimestamp $ header - diff --git a/haskoin-wallet/test/Main.hs b/haskoin-wallet/test/Main.hs deleted file mode 100644 index 6974ef2a..00000000 --- a/haskoin-wallet/test/Main.hs +++ /dev/null @@ -1,15 +0,0 @@ -module Main where - -import Test.Framework (defaultMain) - -import qualified Network.Haskoin.Wallet.Units (tests) -import qualified Network.Haskoin.Wallet.Tests (tests) - -import Network.Haskoin.Constants - -main :: IO () -main = setProdnet >> defaultMain - ( Network.Haskoin.Wallet.Tests.tests - ++ Network.Haskoin.Wallet.Units.tests - ) - diff --git a/haskoin-wallet/test/Network/Haskoin/Wallet/Arbitrary.hs b/haskoin-wallet/test/Network/Haskoin/Wallet/Arbitrary.hs deleted file mode 100644 index 0c88f20b..00000000 --- a/haskoin-wallet/test/Network/Haskoin/Wallet/Arbitrary.hs +++ /dev/null @@ -1,21 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Network.Haskoin.Wallet.Arbitrary where - -import Test.QuickCheck (Arbitrary, arbitrary, oneof) - -import Network.Haskoin.Test -import Network.Haskoin.Wallet - -instance Arbitrary AccountType where - arbitrary = oneof - [ return AccountRegular - , do - (m, n) <- arbitraryMSParam - return $ AccountMultisig m n - ] - -instance Arbitrary NodeAction where - arbitrary = oneof [ NodeActionRescan <$> arbitrary - , return NodeActionStatus - ] - diff --git a/haskoin-wallet/test/Network/Haskoin/Wallet/Tests.hs b/haskoin-wallet/test/Network/Haskoin/Wallet/Tests.hs deleted file mode 100644 index a292f5ee..00000000 --- a/haskoin-wallet/test/Network/Haskoin/Wallet/Tests.hs +++ /dev/null @@ -1,23 +0,0 @@ -module Network.Haskoin.Wallet.Tests (tests) where - -import Test.Framework (Test, testGroup) -import Test.Framework.Providers.QuickCheck2 (testProperty) - -import Data.Aeson (FromJSON, ToJSON, encode, decode) -import Data.HashMap.Strict (singleton) - -import Network.Haskoin.Wallet.Arbitrary () -import Network.Haskoin.Wallet - -tests :: [Test] -tests = - [ testGroup "Serialize & de-serialize types to JSON" - [ testProperty "AccountType" (metaID :: AccountType -> Bool) - , testProperty "NodeAction" (metaID :: NodeAction -> Bool) - ] - ] - -metaID :: (FromJSON a, ToJSON a, Eq a) => a -> Bool -metaID x = (decode . encode) (singleton ("object" :: String) x) == - Just (singleton ("object" :: String) x) - diff --git a/haskoin-wallet/test/Network/Haskoin/Wallet/Units.hs b/haskoin-wallet/test/Network/Haskoin/Wallet/Units.hs deleted file mode 100644 index af1e01c8..00000000 --- a/haskoin-wallet/test/Network/Haskoin/Wallet/Units.hs +++ /dev/null @@ -1,1736 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} -module Network.Haskoin.Wallet.Units (tests) where - -import Control.Concurrent.STM -import Control.Concurrent.STM.TBMChan -import Control.Exception (Exception, handleJust) -import Control.Monad (guard) -import Control.Monad.Logger (NoLoggingT) -import Control.Monad.Trans (liftIO) -import Control.Monad.Trans.Resource (ResourceT) -import qualified Data.ByteString as BS -import Data.List (sort) -import Data.Maybe (fromJust, isJust) -import Data.String.Conversions (cs) -import Data.Word (Word32, Word64) -import Database.Persist (Entity (..), entityVal, - getBy) -import Database.Persist.Sqlite (SqlPersistT, - runMigrationSilent, - runSqlite) -import Network.Haskoin.Block -import Network.Haskoin.Crypto -import Network.Haskoin.Node.HeaderTree -import Network.Haskoin.Script -import Network.Haskoin.Transaction -import Network.Haskoin.Wallet.Internals -import Test.Framework (Test, testGroup) -import Test.Framework.Providers.HUnit (testCase) -import Test.HUnit (Assertion, assertBool, - assertEqual, assertFailure) - -import Network.Haskoin.Util - -type App = SqlPersistT (NoLoggingT (ResourceT IO)) - -xPrv1 :: XPrvKey -xPrv1 = "xprv9s21ZrQH143K4a5123LatJaWPdyMvCG4Phpb79kLUXNF3Y9U537QUeKzUjkrdoZVVse747ZnNNUGryPZXEoMFjkuUKyWpEMcg7jbxYECE2b" - --- Bip44 account derivation 0 of xPrv1 -xPub1 :: XPubKey -xPub1 = "xpub6CsAAZAfnTnNFdmJu6St12N1vHHCPpG8uunWsTwc5PPnB4tk4k99mrgGoRBbt48SarGnLJZf5uwqGBtaBnQzVBtoA6aqtZAsx1xYvFCXM6H" - --- TODO: Add tests for accounts with no private key -tests :: [Test] -tests = - [ testGroup "Account tests" - [ testCase "Fail create account with wrong keys" $ - assertException - (WalletException "Invalid account keys") - ( newAccount NewAccount - { newAccountName = "fail-this" - , newAccountType = AccountRegular - -- This key does not correspond to the one below - , newAccountMaster = Just "xprv9s21ZrQH143K33Ezpb81k5upGyhrVcwgqNzHRHnQ2kGBPHkJ3sLPjGwj4LML1kr1bLfguJiY21XrYfVrL1CGurfVoMKSPwRdmzt1LwBtVyR" - , newAccountDeriv = Just 0 - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - ) - - , testCase "Creating two accounts with the same data should fail" $ - assertException - (WalletException "Account already exists") $ do - _ <- newAccount NewAccount - { newAccountName = "main" - , newAccountType = AccountRegular - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Just 0 - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - newAccount NewAccount - { newAccountName = "main" - , newAccountType = AccountRegular - , newAccountMaster = Nothing - , newAccountDeriv = Just 0 - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - , testCase "Invalid multisig parameters (0 of 1)" $ - assertException (WalletException "Invalid account type") $ - newAccount NewAccount - { newAccountName = "multisig-0-of-1" - , newAccountType = AccountMultisig 0 1 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - - , testCase "Invalid multisig parameters (2 of 1)" $ - assertException (WalletException "Invalid account type") $ - newAccount NewAccount - { newAccountName = "multisig-2-of-1" - , newAccountType = AccountMultisig 2 1 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - - , testCase "Invalid multisig parameters (15 of 16)" $ - assertException (WalletException "Invalid account type") $ - newAccount NewAccount - { newAccountName = "multisig-15-of-16" - , newAccountType = AccountMultisig 15 16 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - - , testCase "To many multisig keys (3 keys for 1 of 2)" $ - assertException (WalletException "Invalid account keys") $ - newAccount NewAccount - { newAccountName = "multisig-1-of-2-with-3" - , newAccountType = AccountMultisig 1 2 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = - [ xPub1 - , "xpub661MyMwAqRbcFEPH5Aon6F7edspeu1v6a1Nw5qJgk1aX5XYg1ktBL9Azra2CKaAJ2bHXEXkeKHE3eFaCJktFiA5tSMDQDs6bi83maQtdYby" - , "xpub661MyMwAqRbcFtDszBWpawpg4KbNWL9qD4VdRwjd1L5cmcS8nXHWXpg9WL1Xc9Yh7HbQBwWDw37YJfc4AF3YEpvAHEBPBFQPFkUcFHnopw8" - ] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - - , testCase "Calling addAccountKeys with an empty key list should fail" $ - assertException - (WalletException "Invalid account keys") $ do - res <- newAccount NewAccount - { newAccountName = "multisig-1-of-2-plus-empty" - , newAccountType = AccountMultisig 1 2 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - addAccountKeys (fst res) [] - - , testCase "Calling addAccountKeys on a non-multisig account should fail" $ - assertException - (WalletException "The account is already complete") $ do - res <- newAccount NewAccount - { newAccountName = "regular-plus-more" - , newAccountType = AccountRegular - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - addAccountKeys (fst res) - ["xpub661MyMwAqRbcFEPH5Aon6F7edspeu1v6a1Nw5qJgk1aX5XYg1ktBL9Azra2CKaAJ2bHXEXkeKHE3eFaCJktFiA5tSMDQDs6bi83maQtdYby"] - - , testCase "Adding keys to a complete multisig account should fail" $ - assertException - (WalletException "The account is already complete") $ do - res <- newAccount NewAccount - { newAccountName = "regular-plus-more" - , newAccountType = AccountMultisig 1 2 - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = - [ xPub1 - , "xpub661MyMwAqRbcFEPH5Aon6F7edspeu1v6a1Nw5qJgk1aX5XYg1ktBL9Azra2CKaAJ2bHXEXkeKHE3eFaCJktFiA5tSMDQDs6bi83maQtdYby" - ] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - addAccountKeys (fst res) - ["xpub661MyMwAqRbcFtDszBWpawpg4KbNWL9qD4VdRwjd1L5cmcS8nXHWXpg9WL1Xc9Yh7HbQBwWDw37YJfc4AF3YEpvAHEBPBFQPFkUcFHnopw8"] - - , testCase "Getting a non-existing account should fail" $ - assertException - (WalletException "Account inexistent does not exist") $ - getAccount "inexistent" - - ] - , testGroup "Address tests" - [ testCase "Decreasing the address gap should fail" $ - assertException (WalletException "The gap of an account can only be increased") $ do - res <- newAccount NewAccount - { newAccountName = "reduce-gap" - , newAccountType = AccountRegular - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - accE' <- setAccountGap (fst res) 15 - setAccountGap accE' 14 - - , testCase "Setting a label on a hidden address key should fail" $ - assertException (WalletException "Invalid address index 10") $ do - res <- newAccount NewAccount - { newAccountName = "label-hidden" - , newAccountType = AccountRegular - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - setAddrLabel (fst res) 10 AddressExternal "Gym membership" - - , testCase "Setting a label on an invalid address key should fail" $ - assertException (WalletException "Invalid address index 20") $ do - res <- newAccount NewAccount - { newAccountName = "label-invalid" - , newAccountType = AccountRegular - , newAccountMaster = Just xPrv1 - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - setAddrLabel (fst res) 20 AddressExternal "Gym membership" - - , testCase "Requesting an address prvkey on a read-only account should fail" $ - assertException - (WalletException "Could not get private key") $ do - res <- newAccount NewAccount - { newAccountName = "label-invalid" - , newAccountType = AccountRegular - , newAccountMaster = Nothing - , newAccountDeriv = Nothing - , newAccountKeys = [xPub1] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - addressPrvKey (fst res) Nothing 2 AddressExternal - , testCase "Invalid entropy (15)" $ assertException - (WalletException "Entropy can only be 16, 20, 24, 28 or 32 bytes") $ - newAccount NewAccount - { newAccountName = "account" - , newAccountType = AccountRegular - , newAccountMaster = Nothing - , newAccountDeriv = Nothing - , newAccountKeys = [] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Just 15 - } - , testCase "Invalid entropy (33)" $ assertException - (WalletException "Entropy can only be 16, 20, 24, 28 or 32 bytes") $ - newAccount NewAccount - { newAccountName = "account" - , newAccountType = AccountRegular - , newAccountMaster = Nothing - , newAccountDeriv = Nothing - , newAccountKeys = [] - , newAccountMnemonic = Nothing - , newAccountPassword = Nothing - , newAccountReadOnly = False - , newAccountEntropy = Just 33 - } - ] - , testGroup "Wallet tests" - [ testCase "Verify address derivations" $ runUnit testDerivations - , testCase "Verify balances" $ runUnit testBalances - , testCase "Verify balances in conflict" $ runUnit testConflictBalances - , testCase "Offline transactions" $ runUnit testOffline - , testCase "Kill an offline tx by spending its coins" $ runUnit testKillOffline - , testCase "Importing coinbase txs" $ runUnit testCoinbaseTxs - , testCase "Offline transaction exceptions" testOfflineExceptions - , testCase "Multisig test 1" $ runUnit testImportMultisig - , testCase "Kill Tx" $ runUnit testKillTx - , testCase "Delete Tx" $ runUnit testDeleteTx - , testCase "Delete Unsigned Tx" $ runUnit testDeleteUnsignedTx - , testCase "Notifications" $ runUnit testNotification - , testCase "RootToAccKey" $ testRootToAccKey - ] - , testGroup "Dice conversion tests" - [ testCase "Base 6 to Base 16" testDecodeBase6 - , testCase "diceToEntropy" testDiceToEntropy - , testCase "diceToMnemonic" testDiceToMnemonic - , testCase "Invalid dice rolls" testInvalidDiceToEntropy - , testCase "mixEntropy" testMixEntropy - ] - ] - -assertException :: (Exception e, Eq e) => e -> App a -> Assertion -assertException ex action = - handleJust matchEx (const $ return ()) $ do - runUnit action - assertFailure $ "Expecting exception: " ++ show ex - where - matchEx = guard . (== ex) - -runUnit :: App a -> Assertion -runUnit action = do - _ <- runSqlite ":memory:" $ do - _ <- runMigrationSilent migrateWallet - initWallet 0.0001 - action - return () - -ms :: Mnemonic -ms = "mass coast dance birth online various renew alert crunch middle absurd health" - -tid1 :: TxHash -tid1 = "0000000000000000000000000000000000000000000000000000000000000001" - -z :: Hash256 -z = "0000000000000000000000000000000000000000000000000000000000000000" - -fakeNode :: NodeBlock -- ^ Parent - -> [TxHash] -- ^ Transactions - -> Word32 -- ^ Chain - -> Word32 -- ^ Nonce - -> NodeBlock -fakeNode parent tids chain nonce = - nodeBlock parent chain header - where - header = BlockHeader - (blockVersion $ nHeader parent) - (nodeHash parent) - (if null tids then z else buildMerkleRoot tids) - (nodeTimestamp parent + 600) - (blockBits $ nHeader parent) - nonce - --- -- Creates fake testing blocks --- fakeNode :: Word32 -> BlockHash -> NodeBlock --- fakeNode i h = BlockHeaderNode --- { blockHeaderNodeHash = headerHash header --- , blockHeaderNodeHeader = BlockHeader 1 z1 z2 0 0 0 --- , blockHeaderNodeHeight = i --- , blockHeaderNodeWork = 0 --- , blockHeaderNodeMedianTimes = [] --- , blockHeaderNodeMinWork = 0 --- } --- where --- z1 = "0000000000000000000000000000000000000000000000000000000000000000" --- z2 = "0000000000000000000000000000000000000000000000000000000000000000" - -fakeTx :: [(TxHash, Word32)] -> [(BS.ByteString, Word64)] -> Tx -fakeTx xs ys = - createTx 1 txi txo 0 - where - txi = map (\(h,p) -> TxIn (OutPoint h p) (BS.pack [1]) maxBound) xs - f = addressToScriptBS . fromJust . base58ToAddr - txo = map (\(a,v) -> TxOut v $ f a ) ys - -testDerivations :: App () -testDerivations = do - accE <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - - unusedAddresses accE AddressExternal (ListRequest 0 3 False) - >>= liftIO . assertEqual "Generated external addresses do not match" - [ "1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in" - , "1Cf66C6MVTYgMSuXrpn5W1x12RRtAa6v2x" - , "1Guyp96E7ph4PQJoPpz1DLsash8pqjEdVN" - ] . map (addrToBase58 . walletAddrAddress) . fst - - unusedAddresses accE AddressInternal (ListRequest 0 3 False) - >>= liftIO . assertEqual "Generated internal addresses do not match" - [ "1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1" - , "1JMTxp3kbXtwHwFMNPCUc2QyjQNQ8ZZxc4" - , "19mTjNo7m7eRVULZ47PzYvnbdXGHGnKXKi" - ] . map (addrToBase58 . walletAddrAddress) . fst - -assertBalance :: AccountId -> Word32 -> Word64 -> App () -assertBalance ai conf b = do - bal <- accountBalance ai conf False - liftIO $ assertEqual ("Balance is not " ++ show b) b bal - -assertBalanceOffline :: AccountId -> Word32 -> Word64 -> App () -assertBalanceOffline ai conf b = do - bal <- accountBalance ai conf True - liftIO $ assertEqual ("Balance is not " ++ show b) b bal - -assertAddress :: Entity Account - -> Word32 -- Confirmations - -> Word32 -- Address Index - -> AddressType - -> [(KeyIndex, BalanceInfo)] - -> App () -assertAddress acc conf addr addrtype b = do - b' <- addressBalances acc addr addr addrtype conf False - liftIO $ assertEqual "Address Balance incorrect" b b' - -assertAddressOffline :: Entity Account - -> Word32 -- Confirmations - -> Word32 -- Address Index - -> AddressType - -> [(KeyIndex, BalanceInfo)] - -> App () -assertAddressOffline acc conf addr addrtype b = do - b' <- addressBalances acc addr addr addrtype conf True - liftIO $ assertEqual "Address Balance incorrect" b b' - -assertImportTx :: AccountId -> Int -> TxConfidence -> Tx -> App () -assertImportTx ai as conf tx = do - tx' <- testTx <$> importNetTx tx Nothing - liftIO $ assertEqual "Transaction import failed" ([(ai, conf)], as) tx' - -assertImportTxOffline :: AccountId -> Int -> TxConfidence -> Tx -> App () -assertImportTxOffline ai as conf tx = do - tx' <- testTx <$> importTx tx Nothing ai - liftIO $ assertEqual "Transaction import failed" ([(ai, conf)], as) tx' - -assertTxConfidence :: AccountId -> TxHash -> TxConfidence -> App () -assertTxConfidence ai txh conf = do - txM <- getBy $ UniqueAccTx ai txh - case txM of - Just tx -> do - let conf' = walletTxConfidence $ entityVal tx - liftIO $ assertEqual "Transaction confidence wrong" conf' conf - Nothing -> liftIO $ assertFailure "Transaction not found" - -testBalances :: App () -testBalances = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let fundingTx = fakeTx - [ (tid1, 0) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) - , ("1Cf66C6MVTYgMSuXrpn5W1x12RRtAa6v2x", 20000000) - ] - let tx1 = fakeTx - [ (txHash fundingTx, 0) - , (txHash fundingTx, 1) - ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 30000000) ] -- external - tx2 = fakeTx - [ (txHash fundingTx, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 5000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 5000000) -- change - ] - - assertBalance ai 0 0 - - -- Import funding transaction twice. This operation should be idempotent - assertImportTx ai 2 TxPending fundingTx - assertImportTx ai 0 TxPending fundingTx - - spendableCoins ai 0 (const . const []) >>= - liftIO . assertEqual "0-conf spendable coins is not 2" 2 . length - spendableCoins ai 1 (const . const []) >>= - liftIO . assertEqual "1-conf spendable coins is not 0" 0 . length - - assertBalance ai 0 30000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal [(0, BalanceInfo 10000000 0 1 0)] - assertAddress accE 0 1 AddressExternal [(1, BalanceInfo 20000000 0 1 0)] - assertAddress accE 1 0 AddressExternal [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 1 1 AddressExternal [(1, BalanceInfo 0 0 0 0)] - - assertImportTx ai 0 TxPending tx1 - - assertBalance ai 0 0 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 1 1 AddressExternal - [(1, BalanceInfo 0 0 0 0)] - - -- We re-import tx1. This operation has to be idempotent with respect to - -- balances. - assertImportTx ai 0 TxPending tx1 - - assertBalance ai 1 0 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 1 1 AddressExternal - [(1, BalanceInfo 0 0 0 0)] - - -- Importing tx2 twice. This operation has to be idempotent. - assertImportTx ai 1 TxDead tx2 - assertImportTx ai 0 TxDead tx2 - - assertBalance ai 0 0 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - - -- Confirm the funding transaction at height 1 - let block1 = fakeNode nGenesisBlock [txHash fundingTx] 0 1 - importMerkles (BestChain [block1]) [[txHash fundingTx]] Nothing - - assertBalance ai 0 0 - assertBalance ai 1 0 - assertBalance ai 2 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 1 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - - -- Confirm tx1 at height 2 - let block2 = fakeNode block1 [txHash tx1] 0 2 - importMerkles (BestChain [block2]) [[txHash tx1]] Nothing - - assertBalance ai 0 0 - assertBalance ai 1 0 - assertBalance ai 2 0 - assertBalance ai 3 0 - - assertAddress accE 2 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 2 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 3 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 3 1 AddressExternal - [(1, BalanceInfo 0 0 0 0)] - - -- Reorg on tx2 - let block2' = fakeNode block1 [] 1 22 - block3' = fakeNode block2 [txHash tx2] 1 33 - importMerkles (ChainReorg block1 [block2] [block2', block3']) [[], [txHash tx2]] Nothing - - getBy (UniqueAccTx ai (txHash tx1)) - >>= liftIO - . assertEqual "Confidence is not dead" TxDead - . walletTxConfidence . entityVal . fromJust - - getBy (UniqueAccTx ai (txHash tx2)) - >>= liftIO - . assertEqual "Confidence is not building" TxBuilding - . walletTxConfidence . entityVal . fromJust - - assertBalance ai 0 25000000 - assertBalance ai 1 25000000 - assertBalance ai 2 20000000 - assertBalance ai 3 20000000 - assertBalance ai 4 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 0 1 0)] - - assertAddress accE 3 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 3 1 AddressExternal - [(1, BalanceInfo 20000000 0 1 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 5000000 0 1 0)] - assertAddress accE 1 0 AddressInternal - [(0, BalanceInfo 5000000 0 1 0)] - assertAddress accE 2 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Reimporting tx2 should be idempotent and return TxBuilding - assertImportTx ai 0 TxBuilding tx2 - - accountBalance ai 0 False >>= - liftIO . assertEqual "Balance is not 25000000" 25000000 - accountBalance ai 1 False >>= - liftIO . assertEqual "Balance is not 25000000" 25000000 - accountBalance ai 2 False >>= - liftIO . assertEqual "Balance is not 20000000" 20000000 - accountBalance ai 3 False >>= - liftIO . assertEqual "Balance is not 20000000" 20000000 - accountBalance ai 4 False >>= - liftIO . assertEqual "Balance is not 0" 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 0 1 0)] - assertAddress accE 3 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 3 1 AddressExternal - [(1, BalanceInfo 20000000 0 1 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 5000000 0 1 0)] - assertAddress accE 1 0 AddressInternal - [(0, BalanceInfo 5000000 0 1 0)] - assertAddress accE 2 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Reorg back onto tx1 - let block3 = fakeNode block2 [] 0 3 - block4 = fakeNode block3 [] 0 4 - importMerkles (ChainReorg block1 [block2', block3'] [block2, block3, block4]) - [[txHash tx1], [], []] Nothing - - assertBalance ai 0 0 - assertBalance ai 1 0 - assertBalance ai 2 0 - assertBalance ai 3 0 - assertBalance ai 4 0 - assertBalance ai 5 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 4 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 4 1 AddressExternal - [(1, BalanceInfo 20000000 20000000 1 1)] - assertAddress accE 5 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 5 1 AddressExternal - [(1, BalanceInfo 0 0 0 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - --- tx1, tx2 and tx3 form a chain, and tx4 is in conflict with tx1 -testConflictBalances :: App () -testConflictBalances = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - tx4 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 20000000) ] - - -- Import first transaction - assertImportTx ai 1 TxPending tx1 - - assertBalance ai 0 10000000 - assertBalance ai 0 10000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - - -- Import second transaction - assertImportTx ai 1 TxPending tx2 - - assertBalance ai 0 4000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - - -- Let's confirm these two transactions - let block1 = fakeNode nGenesisBlock [txHash tx1] 0 1 - block2 = fakeNode block1 [txHash tx2] 0 2 - importMerkles (BestChain [block1, block2]) - [[txHash tx1], [txHash tx2]] Nothing - - assertBalance ai 0 4000000 - assertBalance ai 1 4000000 - assertBalance ai 2 0 - assertBalance ai 3 0 - - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 1 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - assertAddress accE 2 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 2 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Import third transaction - assertImportTx ai 0 TxPending tx3 - - assertBalance ai 0 0 - assertBalance ai 1 0 - assertBalance ai 2 0 - assertBalance ai 3 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 1 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - assertAddress accE 2 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 2 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Now let's add tx4 which is in conflict with tx1 - assertImportTx ai 0 TxDead tx4 - - assertBalance ai 0 0 - assertBalance ai 1 0 - assertBalance ai 2 0 - assertBalance ai 3 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - assertAddress accE 1 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 1 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - assertAddress accE 2 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 2 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Now we trigger a reorg that validates tx4. tx1, tx2 and tx3 should be dead - let block1' = fakeNode nGenesisBlock [] 1 11 - block2' = fakeNode block1' [txHash tx4] 1 22 - block3' = fakeNode block2' [] 1 33 - importMerkles - (ChainReorg nGenesisBlock [block1, block2] [block1', block2', block3']) - [[], [txHash tx4], []] Nothing - - assertTxConfidence ai (txHash tx1) TxDead - assertTxConfidence ai (txHash tx2) TxDead - assertTxConfidence ai (txHash tx3) TxDead - - assertBalance ai 0 20000000 - assertBalance ai 1 20000000 - assertBalance ai 2 20000000 - assertBalance ai 3 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 20000000 0 1 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 2 0 AddressExternal - [(0, BalanceInfo 20000000 0 1 0)] - assertAddress accE 3 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - - -- Reorg back to tx1, tx2 and tx3 - let block3 = fakeNode block2 [] 0 3 - block4 = fakeNode block3 [] 0 4 - importMerkles - (ChainReorg nGenesisBlock [block1', block2', block3'] - [block1, block2, block3, block4]) - [[txHash tx1], [txHash tx2], [], []] Nothing - - assertTxConfidence ai (txHash tx1) TxBuilding - assertTxConfidence ai (txHash tx2) TxBuilding - - -- Tx3 remains dead until it is included into a block. Dead transaction are - -- only revived upon confirmations. They are not revived if they are not - -- confirmed even if they have no conflicts anymore. - assertTxConfidence ai (txHash tx3) TxDead - assertTxConfidence ai (txHash tx4) TxDead - - assertBalance ai 0 4000000 - assertBalance ai 1 4000000 - assertBalance ai 2 4000000 - assertBalance ai 3 4000000 - assertBalance ai 4 0 - assertBalance ai 5 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - assertAddress accE 3 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 3 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - assertAddress accE 4 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddress accE 4 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - assertAddress accE 5 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - -testOffline :: App () -testOffline = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - tx4 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 20000000) ] - - -- Import first transaction - assertImportTxOffline ai 1 TxOffline tx1 - - assertBalance ai 0 0 - assertBalanceOffline ai 0 10000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - - -- Reimporting a transaction should me idempotent - assertImportTxOffline ai 0 TxOffline tx1 - - assertBalance ai 0 0 - assertBalanceOffline ai 0 10000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 0 0 0 0)] - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - - -- Import tx2 - assertImportTxOffline ai 1 TxOffline tx2 - - assertBalanceOffline ai 0 4000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - - -- Import tx3 - assertImportTxOffline ai 0 TxOffline tx3 - - assertBalanceOffline ai 0 0 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - - -- Import tx4 - assertImportTxOffline ai 0 TxOffline tx4 - - assertTxConfidence ai (txHash tx1) TxDead - assertTxConfidence ai (txHash tx2) TxDead - assertTxConfidence ai (txHash tx3) TxDead - - assertBalanceOffline ai 0 20000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 20000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- importTx should be idempotent - assertImportTxOffline ai 0 TxOffline tx4 - - assertBalanceOffline ai 0 20000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 20000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -tid0 :: TxHash -tid0 = "0000000000000000000000000000000000000000000000000000000000000000" - -testCoinbaseTxs :: App () -testCoinbaseTxs = do - (Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let cb1 = fakeTx - [ (tid0, 0) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - cb2 = fakeTx - [ (tid0, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx1 = fakeTx - [ (tid1, 0) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (tid1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - block1 = fakeNode nGenesisBlock [txHash cb1, txHash tx1] 0 1 - block2 = fakeNode block1 [txHash cb2, txHash tx2] 0 1 - - liftIO $ assertBool "Tx1 is not a coinbase tx" $ isCoinbaseTx cb1 - liftIO $ assertBool "Tx2 is not a coinbase tx" $ isCoinbaseTx cb2 - - -- Here we are testing that coinbase transactions can be imported - -- without double spending each other (they all spend the same coin 0x000...) - - assertImportTx ai 1 TxPending cb1 - assertImportTx ai 0 TxPending tx1 - importMerkles (BestChain [block1]) [[txHash cb1, txHash tx1]] Nothing - assertTxConfidence ai (txHash cb1) TxBuilding - assertTxConfidence ai (txHash tx1) TxBuilding - - assertImportTx ai 1 TxPending cb2 -- This is pending and not dead - assertImportTx ai 0 TxDead tx2 - importMerkles (BestChain [block1, block2]) [[txHash cb2, txHash tx2]] Nothing - assertTxConfidence ai (txHash cb1) TxBuilding -- This is pending and not dead - assertTxConfidence ai (txHash tx1) TxDead - assertTxConfidence ai (txHash cb2) TxBuilding - assertTxConfidence ai (txHash tx2) TxBuilding - -testKillOffline :: App () -testKillOffline = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - tx4 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 2000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 3000000) -- change - , ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 5000000) -- more change - ] - - -- Import tx1 as a network transaction - assertImportTx ai 1 TxPending tx1 - - assertBalance ai 0 10000000 - assertBalanceOffline ai 0 10000000 - assertBalance ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - - -- Import tx2 as offline - assertImportTxOffline ai 1 TxOffline tx2 - - assertBalance ai 0 10000000 - assertBalanceOffline ai 0 4000000 - assertBalance ai 1 0 - assertBalanceOffline ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - - -- Import tx3 as offline - assertImportTxOffline ai 0 TxOffline tx3 - - assertBalance ai 0 10000000 - assertBalanceOffline ai 0 0 - assertBalance ai 1 0 - assertBalanceOffline ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - - -- Import tx4 as a network transaction. It should override tx2 and tx3. - assertImportTx ai 0 TxPending tx4 - - assertTxConfidence ai (txHash tx2) TxDead - assertTxConfidence ai (txHash tx3) TxDead - - assertBalance ai 0 8000000 - assertBalanceOffline ai 0 8000000 - assertBalance ai 1 0 - assertBalanceOffline ai 1 0 - - assertAddress accE 0 0 AddressExternal - [(0, BalanceInfo 15000000 10000000 2 1)] - assertAddress accE 0 0 AddressInternal - [(0, BalanceInfo 3000000 0 1 0)] - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 15000000 10000000 2 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 3000000 0 1 0)] - -testOfflineExceptions :: Assertion -testOfflineExceptions = do - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - tx4 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 20000000) ] - - assertException (WalletException "Could not import offline transaction") $ do - _ <- newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - Entity ai _ <- getAccount "acc1" - assertImportTx ai 1 TxPending tx1 - importTx tx4 Nothing ai - - assertException (WalletException "Could not import offline transaction") $ do - _ <- newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - Entity ai _ <- getAccount "acc1" - assertImportTx ai 1 TxPending tx4 - assertImportTx ai 0 TxDead tx1 - assertImportTx ai 1 TxDead tx2 - importTx tx3 Nothing ai - - assertException (WalletException "Could not import offline transaction") $ do - _ <- newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - Entity ai _ <- getAccount "acc1" - assertImportTx ai 1 TxPending tx1 - importTx tx1 Nothing ai - --- This test create a multisig account with the key of testImportMultisig2 -testImportMultisig :: App () -testImportMultisig = do - accE1@(Entity ai1 _) <- fst <$> newAccount NewAccount - { newAccountName = "ms1" - , newAccountType = AccountMultisig 2 2 - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = ["xpub6C5bmQQw4h4DVMVydW4bhtuz4jZpUpsrvfMYdZXVVuXyePRcDhBzXufZ5sfSZqtcnXPtDCYyAAPPkuAKEtasfRo9RatgFNP4X58zM1QjjYK"] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - accE2@(Entity ai2 _) <- fst <$> newAccount NewAccount - { newAccountName = "ms2" - , newAccountType = AccountMultisig 2 2 - , newAccountDeriv = Just 1 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = ["xpub6C5bmQQw4h4DSHbWsT7GDaU1CxcamCwTGRo81T2g9VewnEyb16eHwzojDPsZguGizLD3ttFynKPby7ABY4MQ3xAf5DNafj32uf84Gw48Phb"] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let fundingTx = createTx - 1 - [ TxIn (OutPoint tid1 0) (BS.pack [1]) maxBound ] -- dummy input - [ TxOut 10000000 $ - addressToScriptBS $ fromJust $ - base58ToAddr "32SupmLrYyZMSPfL4tgkuUCyvbzEyBfqxd" - ] - 0 - - importNetTx fundingTx Nothing - >>= liftIO - . assertEqual "Transaction import failed" - ([(ai1, TxPending), (ai2, TxPending)], 2) - . testTx - - -- Create a transaction which has 0 signatures in ms1 - tx1 <- fst <$> createWalletTx accE1 Nothing Nothing - [ ( fromJust $ base58ToAddr "3AV9s2W9atAaChWdwTpRv8qvTHcV7L1zyj" - , 5000000 - ) - ] 100 0 False True - - liftIO $ assertEqual "Confidence is not offline" TxOffline $ - walletTxConfidence tx1 - - spendableCoins ai1 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" [] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai1 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx1]) - . sort . map walletTxHash . fst - - assertBalance ai1 0 10000000 - assertBalance ai1 1 0 - assertBalanceOffline ai1 0 9966200 - - -- Import the empty transaction in ms2 - tx2 <- head . fst <$> importTx (walletTxTx tx1) Nothing ai2 - - -- This second import should be idempotent - _ <- importTx (walletTxTx tx1) Nothing ai2 - - liftIO $ assertEqual "Txid do not match" - (walletTxHash tx1) (walletTxHash tx2) - - liftIO $ assertEqual "Confidence is not offline" TxOffline $ - walletTxConfidence tx2 - - spendableCoins ai2 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" [] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai2 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx2]) - . sort . map walletTxHash . fst - - assertBalance ai2 0 10000000 - assertBalance ai2 1 0 - assertBalanceOffline ai2 0 9966200 - - -- Sign the transaction in ms2 - tx3:_ <- fst <$> signAccountTx accE2 Nothing Nothing (walletTxHash tx2) - - liftIO $ assertEqual "Confidence is not pending" TxPending $ - walletTxConfidence tx3 - - spendableCoins ai2 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" - [walletTxHash tx3, walletTxHash tx3] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai2 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx3]) - . sort . map walletTxHash . fst - - assertBalance ai2 0 9966200 - assertBalance ai2 1 0 - assertBalanceOffline ai2 0 9966200 - - tx4 <- fmap (entityVal . fromJust) $ - getBy $ UniqueAccTx ai1 $ walletTxHash tx3 - - liftIO $ assertEqual "Confidence is not pending" TxPending $ - walletTxConfidence tx4 - - spendableCoins ai1 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" - [walletTxHash tx3, walletTxHash tx3] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai1 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx3]) - . sort . map walletTxHash . fst - - assertBalance ai1 0 9966200 - assertBalance ai1 1 0 - assertBalanceOffline ai1 0 9966200 - - -- Importing the transaction should have no effect as it was globally - -- imported already in the previous step. - tx5 <- head . fst <$> importTx (walletTxTx tx3) Nothing ai1 - - liftIO $ assertEqual "Confidence is not pending" TxPending $ - walletTxConfidence tx5 - - spendableCoins ai1 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" - [walletTxHash tx5, walletTxHash tx5] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai1 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx5]) - . sort . map walletTxHash . fst - - assertBalance ai1 0 9966200 - assertBalance ai1 1 0 - assertBalanceOffline ai1 0 9966200 - -testDeleteTx :: App () -testDeleteTx = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - - assertImportTx ai 1 TxPending tx1 - assertImportTx ai 1 TxPending tx2 - assertImportTx ai 0 TxPending tx3 - - assertBalance ai 0 0 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - - tx2M' <- getTx $ txHash tx2 - liftIO $ assertBool "Transaction 2 not found" $ isJust tx2M' - deleteTx $ txHash tx2 - - tx1M <- getTx $ txHash tx1 - tx2M'' <- getTx $ txHash tx2 - tx3M <- getTx $ txHash tx3 - liftIO $ assertEqual "Transaction 1 removed" (Just tx1) tx1M - liftIO $ assertEqual "Transaction 2 not removed" Nothing tx2M'' - liftIO $ assertEqual "Transaction 3 not removed" Nothing tx3M - - assertBalance ai 0 10000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - -testDeleteUnsignedTx :: App () -testDeleteUnsignedTx = do - accE1@(Entity ai1 _) <- fst <$> newAccount NewAccount - { newAccountName = "ms1" - , newAccountType = AccountMultisig 2 2 - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = ["xpub6C5bmQQw4h4DVMVydW4bhtuz4jZpUpsrvfMYdZXVVuXyePRcDhBzXufZ5sfSZqtcnXPtDCYyAAPPkuAKEtasfRo9RatgFNP4X58zM1QjjYK"] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - Entity ai2 _ <- fst <$> newAccount NewAccount - { newAccountName = "ms2" - , newAccountType = AccountMultisig 2 2 - , newAccountDeriv = Just 1 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = ["xpub6C5bmQQw4h4DSHbWsT7GDaU1CxcamCwTGRo81T2g9VewnEyb16eHwzojDPsZguGizLD3ttFynKPby7ABY4MQ3xAf5DNafj32uf84Gw48Phb"] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let fundingTx = createTx - 1 - [ TxIn (OutPoint tid1 0) (BS.pack [1]) maxBound ] -- dummy input - [ TxOut 10000000 $ - addressToScriptBS $ fromJust $ - base58ToAddr "32SupmLrYyZMSPfL4tgkuUCyvbzEyBfqxd" - ] - 0 - - importNetTx fundingTx Nothing - >>= liftIO - . assertEqual "Confidence is not pending" - ([(ai1, TxPending), (ai2, TxPending)], 2) - . testTx - - -- Create a transaction which has 0 signatures in ms1 - tx1 <- fst <$> createWalletTx accE1 Nothing Nothing - [ ( fromJust $ base58ToAddr "3AV9s2W9atAaChWdwTpRv8qvTHcV7L1zyj" - , 5000000 - ) - ] 100 0 False True - - liftIO $ assertEqual "Confidence is not offline" TxOffline $ - walletTxConfidence tx1 - - spendableCoins ai1 0 (const . const []) - >>= liftIO - . assertEqual "Wrong txhash in coins" [] - . map (walletCoinHash . entityVal . inCoinDataCoin) - - txs Nothing ai1 (ListRequest 0 10 False) - >>= liftIO - . assertEqual "Wrong txhash in tx list" - (sort [txHash fundingTx, walletTxHash tx1]) - . sort . map walletTxHash . fst - - assertBalance ai1 0 10000000 - assertBalance ai1 1 0 - assertBalanceOffline ai1 0 9966200 - - tx1EM <- getTx $ walletTxHash tx1 - liftIO $ assertBool "Transaction 1 not found" $ isJust tx1EM - deleteTx $ walletTxHash tx1 - - tx1M <- getTx $ walletTxHash tx1 - liftIO $ assertEqual "Transaction not removed" Nothing tx1M - -testNotification :: App () -testNotification = do - _ <- newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - - notifChan <- liftIO $ atomically $ newTBMChan 1000 - - _ <- importNetTx tx1 (Just notifChan) - tx1NM <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx1NM of - Just (NotifTx JsonTx{..}) -> - assertEqual "Notif hash does not match" (txHash tx1) jsonTxHash - _ -> assertFailure "Transaction notification is not the right type" - - _ <- importNetTx tx2 (Just notifChan) - tx2NM <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx2NM of - Just (NotifTx JsonTx{..}) -> - assertEqual "Notif hash does not match" (txHash tx2) jsonTxHash - _ -> assertFailure "Transaction notification is not the right type" - - _ <- importNetTx tx3 Nothing - - let block1 = fakeNode nGenesisBlock [txHash tx1] 0 1 - block2 = fakeNode block1 [txHash tx2] 0 2 - best = BestChain [block1, block2] - txs1 = [[txHash tx1], [txHash tx2]] - importMerkles best txs1 (Just notifChan) - b1NM <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case b1NM of - Just (NotifBlock JsonBlock{..}) -> - assertEqual "Block hash does not match" - (nodeHash block1) jsonBlockHash - _ -> assertFailure "Block notification not the right type" - tx1NM' <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx1NM' of - Just (NotifTx JsonTx{..}) -> - assertEqual "Transaction list does not match" (txHash tx1) - jsonTxHash - _ -> assertFailure "Transaction notification not the right type" - b2NM <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case b2NM of - Just (NotifBlock JsonBlock{..}) -> - assertEqual "Block hash does not match" - (nodeHash block2) jsonBlockHash - _ -> assertFailure "Block notification not the right type" - tx2NM' <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx2NM' of - Just (NotifTx JsonTx{..}) -> - assertEqual "Transaction list does not match" (txHash tx2) - jsonTxHash - _ -> assertFailure "Transaction notification not the right type" - - - let block2' = fakeNode block1 [txHash tx2, txHash tx3] 1 22 - txs2 = [[txHash tx2, txHash tx3]] - reorg = ChainReorg block1 [block2] [block2'] - importMerkles reorg txs2 (Just notifChan) - b3NM <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case b3NM of - Just (NotifBlock JsonBlock{..}) -> - assertEqual "Block hash does not match" - (nodeHash block2') jsonBlockHash - _ -> assertFailure "Block notification not the right type" - tx3NM2 <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx3NM2 of - Just (NotifTx JsonTx{..}) -> - assertEqual "Transaction does not match" - (txHash tx2) - jsonTxHash - _ -> assertFailure "Transaction notification not the right type" - tx3NM3 <- liftIO $ atomically $ readTBMChan notifChan - liftIO $ case tx3NM3 of - Just (NotifTx JsonTx{..}) -> - assertEqual "Transaction does not match" - (txHash tx3) - jsonTxHash - _ -> assertFailure "Transaction notification not the right type" - - -testKillTx :: App () -testKillTx = do - accE@(Entity ai _) <- fst <$> newAccount NewAccount - { newAccountName = "acc1" - , newAccountType = AccountRegular - , newAccountDeriv = Just 0 - , newAccountMaster = Nothing - , newAccountMnemonic = Just (cs ms) - , newAccountPassword = Nothing - , newAccountKeys = [] - , newAccountReadOnly = False - , newAccountEntropy = Nothing - } - let tx1 = fakeTx - [ (tid1, 4) ] - [ ("1BThGRupK6Ah44sfCtsg2QkoEDJA58d8in", 10000000) ] - tx2 = fakeTx - [ (txHash tx1, 0) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 6000000) -- external - , ("1JGvK2MYQ3wwxMdYeyf7Eg1HeVJuEq3AT1", 4000000) -- change - ] - tx3 = fakeTx - [ (txHash tx2, 1) ] - [ ("1MchgrtQEUgV1f7Nqe1vEzvdmBzJHz8zrY", 4000000) ] -- external - - assertImportTx ai 1 TxPending tx1 - assertImportTx ai 1 TxPending tx2 - assertImportTx ai 0 TxPending tx3 - - assertBalanceOffline ai 0 0 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 4000000 1 1)] - - killTxs Nothing [txHash tx2] - - assertBalanceOffline ai 0 10000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - -- Killing a transaction should be idempotent - killTxs Nothing [txHash tx2] - - assertBalanceOffline ai 0 10000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - killTxs Nothing [txHash tx3] - - assertBalanceOffline ai 0 10000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 0 1 0)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 0 0 0 0)] - - reviveTx Nothing tx2 - - assertBalanceOffline ai 0 4000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - - -- Reviving a transaction should be idempotent - reviveTx Nothing tx2 - - assertBalanceOffline ai 0 4000000 - - assertAddressOffline accE 0 0 AddressExternal - [(0, BalanceInfo 10000000 10000000 1 1)] - assertAddressOffline accE 0 0 AddressInternal - [(0, BalanceInfo 4000000 0 1 0)] - -testTx :: ([WalletTx], [WalletAddr]) - -> ([(AccountId, TxConfidence)], Int) -testTx (txls, addrs) = (map f txls, length addrs) - where - f tx = (walletTxAccount tx, walletTxConfidence tx) - -testDecodeBase6 :: Assertion -testDecodeBase6 = do - assertEqual "Unit 1" (decodeBase6 BS.empty) $ Just BS.empty - assertEqual "Unit 2" (decodeBase6 "6") $ decodeHex "00" - assertEqual "Unit 3" (decodeBase6 "666") $ decodeHex "00" - assertEqual "Unit 4" (decodeBase6 "661") $ decodeHex "01" - assertEqual "Unit 5" (decodeBase6 "6615") $ decodeHex "0B" - assertEqual "Unit 6" (decodeBase6 "6645") $ decodeHex "1D" - assertEqual "Unit 7" (decodeBase6 "66456666") $ decodeHex "92D0" - assertEqual "Unit 8" (decodeBase6 "111111111111111111111111111111111") $ decodeHex "07E65FDC244B0133333333" - assertEqual "Unit 9" (decodeBase6 "55555555555555555555555555555555") $ decodeHex "06954FE21E3E80FFFFFFFF" - assertEqual "Unit 10" (decodeBase6 "161254362643213454433626115643626632163246612666332415423213664") $ decodeHex "0140F8D002341BDF377F1723C9EB6C7ACFF134581C" - -testDiceToEntropy :: Assertion -testDiceToEntropy = do - assertEqual "Unit 1" (diceToEntropy "666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666") $ Right $ BS.replicate 32 0x00 - assertEqual "Unit 2" (diceToEntropy "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") $ Right $ fromJust $ decodeHex "302582058C61D13F1F9AA61CB6B5982DC3D9A42B333333333333333333333333" - assertEqual "Unit 3" (diceToEntropy "666655555555555555555544444444444444444444444333333333333333333322222222222222222111111111111111111") $ Right $ fromJust $ decodeHex "002F8D57547E01B124FE849EE71CB96CA91478A542F7D4AA833EFAF5255F3333" - assertEqual "Unit 4" (diceToEntropy "615243524162543244414631524314243526152432442413461523424314523615243251625434236413615423162365223") $ Right $ fromJust $ decodeHex "0CC66852D7580358E47819E37CDAF115E00364724346D83D49E59F094DB4972F" - --- These test cases have been generated with haskoin and are provided here --- for regression -testDiceToMnemonic :: Assertion -testDiceToMnemonic = do - assertEqual "Unit 1" (diceToMnemonic "666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666") $ Right "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art" - assertEqual "Unit 2" (diceToMnemonic "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") $ Right "coral clown lift boat brown panel lazy feel bronze remember gravity fortune diesel spirit proud grid creek office smoke grid creek office smoke interest" - -testInvalidDiceToEntropy :: Assertion -testInvalidDiceToEntropy = do - assertEqual "Invalid empty dice roll" - (Left "99 dice rolls are required") - (diceToEntropy "") - assertEqual "Dice roll too short" - (Left "99 dice rolls are required") - (diceToEntropy "666") - assertEqual "Dice roll too short (98 rolls)" - (Left "99 dice rolls are required") - (diceToEntropy "66666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666") - assertEqual "Dice roll too short (100 rolls)" - (Left "99 dice rolls are required") - (diceToEntropy "6666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666") - assertEqual "Invalid dice roll digit 7" - (Left "Could not decode base6") - (diceToEntropy "666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666667") - assertEqual "Invalid dice roll digit 0" - (Left "Could not decode base6") - (diceToEntropy "666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666660") - -testMixEntropy :: Assertion -testMixEntropy = do - assertEqual "Unit 1" - (BS.pack [0x00]) - (mixEntropy (BS.pack [0x00]) (BS.pack [0x00])) - assertEqual "Unit 2" - (BS.pack [0xff]) - (mixEntropy (BS.pack [0x00]) (BS.pack [0xff])) - assertEqual "Unit 3" - (BS.pack [0xff]) - (mixEntropy (BS.pack [0xff]) (BS.pack [0x00])) - assertEqual "Unit 4" - (BS.pack [0x00]) - (mixEntropy (BS.pack [0xff]) (BS.pack [0xff])) - assertEqual "Unit 5" - (BS.pack [0xff]) - (mixEntropy (BS.pack [0xaa]) (BS.pack [0x55])) - assertEqual "Unit 6" - (BS.pack [0xff, 0xff]) - (mixEntropy (BS.pack [0x55, 0xaa]) (BS.pack [0xaa, 0x55])) - assertEqual "Unit 7" - (BS.pack [0xa9, 0xda]) - (mixEntropy (BS.pack [0x7a, 0x54]) (BS.pack [0xd3, 0x8e])) - -testRootToAccKey :: Assertion -testRootToAccKey = do - let root = "xprv9s21ZrQH143K3rhWwd1RSvWM64Z2a5ZzT5RHZ7pPC4DikEGW9AWTLzuGfX8C117bfhargkgKSm3PTSyM74AnkLfJo8iHb4hoRmJWZ5AH1C6" - prv0 = "xprv9y6FMtt3EKVvDoX3mRaFrSXGevn6MkDbuCsXD4d4bA7xuSeSTZL3QCVFN7u1dGf7af1utDqwGjXSKVsPbcy4s5dVRJ4iFpQseVf4ZgHZuMP" - prv1 = "xprv9y6FMtt3EKVvGsRWXUXbLkyFWhjL5NA1ZSRwqB7swZzzmb6Tg9sjz7M5EavfWWBzLxSzA5AHxZswUCwV1KUxhyKefw8UBsWT34HuDisuDg7" - prv2 = "xprv9y6FMtt3EKVvLtvKwuA8H4AFB2Vz4o7T15y4GDJ3FjY2vxafMdDLBuCkG65HDW8nMQs3FVRr3SqMQ1X5LHsdDd6VzxuwLmPcz7SP1TafMyN" - acc0 = "xpub6C5bmQQw4h4DSHbWsT7GDaU1CxcamCwTGRo81T2g9VewnEyb16eHwzojDPsZguGizLD3ttFynKPby7ABY4MQ3xAf5DNafj32uf84Gw48Phb" - acc1 = "xpub6C5bmQQw4h4DVMVydW4bhtuz4jZpUpsrvfMYdZXVVuXyePRcDhBzXufZ5sfSZqtcnXPtDCYyAAPPkuAKEtasfRo9RatgFNP4X58zM1QjjYK" - acc2 = "xpub6C5bmQQw4h4DZNzo3vh8eC6yj4LUUFqJNJtf4bhep551okuouAXajhXE7QZqGNLJigrRWairbTKeKZ5LsLcSNbmzZGarY1JJcjFDi8JxFPC" - custom = "xprvA1kxrV2ViHBViVSwasXMsWutXtNR8N3L6w88KT7Z6FodeCVtg2qsrgnqcvLkMpoNq4tSnEuNwFaKtSTaAH2U15qipAPWpT7g1PG6ecZfED8" - - assertEqual "Unit 1" prv0 $ rootToAccKey root 0 - assertEqual "Unit 2" prv0 $ rootToAccKey prv0 0 - assertEqual "Unit 3" custom $ rootToAccKey custom 0 - assertEqual "Unit 4" [prv0, prv1, prv2] $ - rootToAccKeys root [acc0, acc1, acc2] - assertEqual "Unit 5" [prv0] $ - rootToAccKeys prv0 [acc0, acc1, acc2] - assertEqual "Unit 6" [custom] $ - rootToAccKeys custom [acc0, acc1, acc2] - diff --git a/haskoin-core/src/Network/Haskoin/Block.hs b/src/Network/Haskoin/Block.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Block.hs rename to src/Network/Haskoin/Block.hs diff --git a/haskoin-core/src/Network/Haskoin/Block/Genesis.hs b/src/Network/Haskoin/Block/Genesis.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Block/Genesis.hs rename to src/Network/Haskoin/Block/Genesis.hs diff --git a/haskoin-core/src/Network/Haskoin/Block/Headers.hs b/src/Network/Haskoin/Block/Headers.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Block/Headers.hs rename to src/Network/Haskoin/Block/Headers.hs diff --git a/haskoin-core/src/Network/Haskoin/Block/Merkle.hs b/src/Network/Haskoin/Block/Merkle.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Block/Merkle.hs rename to src/Network/Haskoin/Block/Merkle.hs diff --git a/haskoin-core/src/Network/Haskoin/Block/Types.hs b/src/Network/Haskoin/Block/Types.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Block/Types.hs rename to src/Network/Haskoin/Block/Types.hs diff --git a/haskoin-core/src/Network/Haskoin/Constants.hs b/src/Network/Haskoin/Constants.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Constants.hs rename to src/Network/Haskoin/Constants.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto.hs b/src/Network/Haskoin/Crypto.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto.hs rename to src/Network/Haskoin/Crypto.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/Base58.hs b/src/Network/Haskoin/Crypto/Base58.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/Base58.hs rename to src/Network/Haskoin/Crypto/Base58.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/ECDSA.hs b/src/Network/Haskoin/Crypto/ECDSA.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/ECDSA.hs rename to src/Network/Haskoin/Crypto/ECDSA.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/ExtendedKeys.hs b/src/Network/Haskoin/Crypto/ExtendedKeys.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/ExtendedKeys.hs rename to src/Network/Haskoin/Crypto/ExtendedKeys.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/Hash.hs b/src/Network/Haskoin/Crypto/Hash.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/Hash.hs rename to src/Network/Haskoin/Crypto/Hash.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/Keys.hs b/src/Network/Haskoin/Crypto/Keys.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/Keys.hs rename to src/Network/Haskoin/Crypto/Keys.hs diff --git a/haskoin-core/src/Network/Haskoin/Crypto/Mnemonic.hs b/src/Network/Haskoin/Crypto/Mnemonic.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Crypto/Mnemonic.hs rename to src/Network/Haskoin/Crypto/Mnemonic.hs diff --git a/haskoin-core/src/Network/Haskoin/Internals.hs b/src/Network/Haskoin/Internals.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Internals.hs rename to src/Network/Haskoin/Internals.hs diff --git a/haskoin-core/src/Network/Haskoin/Network.hs b/src/Network/Haskoin/Network.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Network.hs rename to src/Network/Haskoin/Network.hs diff --git a/haskoin-core/src/Network/Haskoin/Network/Bloom.hs b/src/Network/Haskoin/Network/Bloom.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Network/Bloom.hs rename to src/Network/Haskoin/Network/Bloom.hs diff --git a/haskoin-core/src/Network/Haskoin/Network/Message.hs b/src/Network/Haskoin/Network/Message.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Network/Message.hs rename to src/Network/Haskoin/Network/Message.hs diff --git a/haskoin-core/src/Network/Haskoin/Network/Types.hs b/src/Network/Haskoin/Network/Types.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Network/Types.hs rename to src/Network/Haskoin/Network/Types.hs diff --git a/haskoin-core/src/Network/Haskoin/Script.hs b/src/Network/Haskoin/Script.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Script.hs rename to src/Network/Haskoin/Script.hs diff --git a/haskoin-core/src/Network/Haskoin/Script/Evaluator.hs b/src/Network/Haskoin/Script/Evaluator.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Script/Evaluator.hs rename to src/Network/Haskoin/Script/Evaluator.hs diff --git a/haskoin-core/src/Network/Haskoin/Script/Parser.hs b/src/Network/Haskoin/Script/Parser.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Script/Parser.hs rename to src/Network/Haskoin/Script/Parser.hs diff --git a/haskoin-core/src/Network/Haskoin/Script/SigHash.hs b/src/Network/Haskoin/Script/SigHash.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Script/SigHash.hs rename to src/Network/Haskoin/Script/SigHash.hs diff --git a/haskoin-core/src/Network/Haskoin/Script/Types.hs b/src/Network/Haskoin/Script/Types.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Script/Types.hs rename to src/Network/Haskoin/Script/Types.hs diff --git a/haskoin-core/src/Network/Haskoin/Test.hs b/src/Network/Haskoin/Test.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test.hs rename to src/Network/Haskoin/Test.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Block.hs b/src/Network/Haskoin/Test/Block.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Block.hs rename to src/Network/Haskoin/Test/Block.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Crypto.hs b/src/Network/Haskoin/Test/Crypto.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Crypto.hs rename to src/Network/Haskoin/Test/Crypto.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Message.hs b/src/Network/Haskoin/Test/Message.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Message.hs rename to src/Network/Haskoin/Test/Message.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Network.hs b/src/Network/Haskoin/Test/Network.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Network.hs rename to src/Network/Haskoin/Test/Network.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Script.hs b/src/Network/Haskoin/Test/Script.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Script.hs rename to src/Network/Haskoin/Test/Script.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Transaction.hs b/src/Network/Haskoin/Test/Transaction.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Transaction.hs rename to src/Network/Haskoin/Test/Transaction.hs diff --git a/haskoin-core/src/Network/Haskoin/Test/Util.hs b/src/Network/Haskoin/Test/Util.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Test/Util.hs rename to src/Network/Haskoin/Test/Util.hs diff --git a/haskoin-core/src/Network/Haskoin/Transaction.hs b/src/Network/Haskoin/Transaction.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Transaction.hs rename to src/Network/Haskoin/Transaction.hs diff --git a/haskoin-core/src/Network/Haskoin/Transaction/Builder.hs b/src/Network/Haskoin/Transaction/Builder.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Transaction/Builder.hs rename to src/Network/Haskoin/Transaction/Builder.hs diff --git a/haskoin-core/src/Network/Haskoin/Transaction/Genesis.hs b/src/Network/Haskoin/Transaction/Genesis.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Transaction/Genesis.hs rename to src/Network/Haskoin/Transaction/Genesis.hs diff --git a/haskoin-core/src/Network/Haskoin/Transaction/Types.hs b/src/Network/Haskoin/Transaction/Types.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Transaction/Types.hs rename to src/Network/Haskoin/Transaction/Types.hs diff --git a/haskoin-core/src/Network/Haskoin/Util.hs b/src/Network/Haskoin/Util.hs similarity index 100% rename from haskoin-core/src/Network/Haskoin/Util.hs rename to src/Network/Haskoin/Util.hs diff --git a/stack.yaml b/stack.yaml index bf3fdf9c..a9c75477 100644 --- a/stack.yaml +++ b/stack.yaml @@ -1,9 +1,4 @@ resolver: lts-10.1 -flags: {} -packages: -- haskoin-core -- haskoin-node -- haskoin-wallet extra-deps: - daemons-0.2.1 - pbkdf-1.1.1.1 diff --git a/haskoin-core/test/Main.hs b/test/Main.hs similarity index 100% rename from haskoin-core/test/Main.hs rename to test/Main.hs diff --git a/haskoin-core/test/Network/Haskoin/Block/Tests.hs b/test/Network/Haskoin/Block/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Block/Tests.hs rename to test/Network/Haskoin/Block/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Block/Units.hs b/test/Network/Haskoin/Block/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Block/Units.hs rename to test/Network/Haskoin/Block/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Cereal/Tests.hs b/test/Network/Haskoin/Cereal/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Cereal/Tests.hs rename to test/Network/Haskoin/Cereal/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Base58/Tests.hs b/test/Network/Haskoin/Crypto/Base58/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Base58/Tests.hs rename to test/Network/Haskoin/Crypto/Base58/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Base58/Units.hs b/test/Network/Haskoin/Crypto/Base58/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Base58/Units.hs rename to test/Network/Haskoin/Crypto/Base58/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/ECDSA/Tests.hs b/test/Network/Haskoin/Crypto/ECDSA/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/ECDSA/Tests.hs rename to test/Network/Haskoin/Crypto/ECDSA/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/ExtendedKeys/Tests.hs b/test/Network/Haskoin/Crypto/ExtendedKeys/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/ExtendedKeys/Tests.hs rename to test/Network/Haskoin/Crypto/ExtendedKeys/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/ExtendedKeys/Units.hs b/test/Network/Haskoin/Crypto/ExtendedKeys/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/ExtendedKeys/Units.hs rename to test/Network/Haskoin/Crypto/ExtendedKeys/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Hash/Tests.hs b/test/Network/Haskoin/Crypto/Hash/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Hash/Tests.hs rename to test/Network/Haskoin/Crypto/Hash/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Hash/Units.hs b/test/Network/Haskoin/Crypto/Hash/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Hash/Units.hs rename to test/Network/Haskoin/Crypto/Hash/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Keys/Tests.hs b/test/Network/Haskoin/Crypto/Keys/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Keys/Tests.hs rename to test/Network/Haskoin/Crypto/Keys/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Mnemonic/Tests.hs b/test/Network/Haskoin/Crypto/Mnemonic/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Mnemonic/Tests.hs rename to test/Network/Haskoin/Crypto/Mnemonic/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Mnemonic/Units.hs b/test/Network/Haskoin/Crypto/Mnemonic/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Mnemonic/Units.hs rename to test/Network/Haskoin/Crypto/Mnemonic/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Crypto/Units.hs b/test/Network/Haskoin/Crypto/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Crypto/Units.hs rename to test/Network/Haskoin/Crypto/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Json/Tests.hs b/test/Network/Haskoin/Json/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Json/Tests.hs rename to test/Network/Haskoin/Json/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Network/Units.hs b/test/Network/Haskoin/Network/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Network/Units.hs rename to test/Network/Haskoin/Network/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Script/Tests.hs b/test/Network/Haskoin/Script/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Script/Tests.hs rename to test/Network/Haskoin/Script/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Script/Units.hs b/test/Network/Haskoin/Script/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Script/Units.hs rename to test/Network/Haskoin/Script/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Transaction/Tests.hs b/test/Network/Haskoin/Transaction/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Transaction/Tests.hs rename to test/Network/Haskoin/Transaction/Tests.hs diff --git a/haskoin-core/test/Network/Haskoin/Transaction/Units.hs b/test/Network/Haskoin/Transaction/Units.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Transaction/Units.hs rename to test/Network/Haskoin/Transaction/Units.hs diff --git a/haskoin-core/test/Network/Haskoin/Util/Tests.hs b/test/Network/Haskoin/Util/Tests.hs similarity index 100% rename from haskoin-core/test/Network/Haskoin/Util/Tests.hs rename to test/Network/Haskoin/Util/Tests.hs diff --git a/haskoin-core/test/Regtest.hs b/test/Regtest.hs similarity index 100% rename from haskoin-core/test/Regtest.hs rename to test/Regtest.hs diff --git a/haskoin-core/test/data/script_invalid.json b/test/data/script_invalid.json similarity index 100% rename from haskoin-core/test/data/script_invalid.json rename to test/data/script_invalid.json diff --git a/haskoin-core/test/data/script_valid.json b/test/data/script_valid.json similarity index 100% rename from haskoin-core/test/data/script_valid.json rename to test/data/script_valid.json diff --git a/haskoin-core/test/data/tx_valid.json b/test/data/tx_valid.json similarity index 100% rename from haskoin-core/test/data/tx_valid.json rename to test/data/tx_valid.json