hledger/hledger-lib/Hledger/Read/JournalReader.hs

873 lines
30 KiB
Haskell
Raw Normal View History

2016-04-23 21:27:39 +03:00
--- * doc
-- Lines beginning "--- *" are collapsible orgstruct nodes. Emacs users,
2016-04-23 03:43:16 +03:00
-- (add-hook 'haskell-mode-hook
2016-04-23 21:27:39 +03:00
-- (lambda () (set-variable 'orgstruct-heading-prefix-regexp "--- " t))
2016-04-23 03:43:16 +03:00
-- 'orgstruct-mode)
-- and press TAB on nodes to expand/collapse.
2016-04-23 03:43:16 +03:00
2008-10-01 04:29:58 +04:00
{-|
2008-10-03 06:28:58 +04:00
A reader for hledger's journal file format
(<http://hledger.org/MANUAL.html#the-journal-file>). hledger's journal
format is a compatible subset of c++ ledger's
(<http://ledger-cli.org/3.0/doc/ledger3.html#Journal-Format>), so this
reader should handle many ledger files as well. Example:
2008-10-01 05:40:32 +04:00
@
2012\/3\/24 gift
expenses:gifts $10
assets:cash
2008-10-01 05:40:32 +04:00
@
2008-10-01 04:29:58 +04:00
Journal format supports the include directive which can read files in
other formats, so the other file format readers need to be importable
here. Some low-level journal syntax parsers which those readers also
use are therefore defined separately in Hledger.Read.Common, avoiding
import cycles.
2007-02-09 04:23:12 +03:00
-}
2016-04-23 21:27:39 +03:00
--- * module
{-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings, PackageImports #-}
2016-04-23 03:43:16 +03:00
2010-11-15 10:01:46 +03:00
module Hledger.Read.JournalReader (
--- * exports
-- * Reader
reader,
2016-04-23 03:43:16 +03:00
-- * Parsing utils
genericSourcePos,
2016-04-23 03:43:16 +03:00
parseAndFinaliseJournal,
runJournalParser,
rjp,
2016-04-23 03:43:16 +03:00
-- * Parsers used elsewhere
getParentAccount,
journalp,
directivep,
defaultyeardirectivep,
marketpricedirectivep,
datetimep,
datep,
2015-09-25 03:23:52 +03:00
modifiedaccountnamep,
2014-02-06 01:02:24 +04:00
postingp,
statusp,
emptyorcommentlinep,
followingcommentp
-- * Tests
2018-09-06 23:08:26 +03:00
,tests_JournalReader
)
2010-03-13 02:46:20 +03:00
where
2016-04-23 21:27:39 +03:00
--- * imports
import Prelude ()
import "base-compat-batteries" Prelude.Compat hiding (readFile)
2012-03-30 01:19:35 +04:00
import qualified Control.Exception as C
import Control.Monad
import Control.Monad.Except (ExceptT(..))
import Control.Monad.State.Strict
journal: account directives can declare account types Previously you had to use one of the standard english account names (assets, liabilities..) for top-level accounts, if you wanted to use the bs/bse/cf/is commands. Now, account directives can specify which of the big five categories an account belongs to - asset, liability, equity, revenue or expense - by writing one of the letters A, L, E, R or X two or more spaces after the account name (where the numeric account code used to be). This might change. Some thoughts influencing the current syntax: - easy to type and read - does not require multiple lines - does not depend on any particular account numbering scheme - allows more types later if needed - still anglocentric, but only a little - could be treated as syntactic sugar for account tags later - seems to be compatible with (ignored by) current Ledger The current design permits unlimited account type declarations anywhere in the account tree. So you could declare a liability account somewhere under assets, and maybe a revenue account under that, and another asset account even further down. In such cases you start to see oddities like accounts appearing in multiple places in a tree-mode report. In theory the reports will still behave reasonably, but this has not been tested too hard. In any case this is clearly too much freedom. I have left it this way, for now, in case it helps with: - modelling contra accounts ? - multiple files. I suspect the extra expressiveness may come in handy when combining multiple files with account type declarations, rewriting account names, apply parent accounts etc. If we only allowed type declarations on top-level accounts, or only allowed a single account of each type, complications seem likely.
2018-09-27 04:34:48 +03:00
import Data.Maybe
import qualified Data.Map.Strict as M
lib: textification: parse stream 10% more allocation, but 35% lower maximum residency, and slightly quicker. hledger -f data/100x100x10.journal stats <<ghc: 39327768 bytes, 77 GCs, 196834/269496 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.010 elapsed), 0.020 MUT (0.092 elapsed), 0.014 GC (0.119 elapsed) :ghc>> <<ghc: 42842136 bytes, 84 GCs, 194010/270912 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.009 elapsed), 0.016 MUT (0.029 elapsed), 0.012 GC (0.120 elapsed) :ghc>> hledger -f data/1000x1000x10.journal stats <<ghc: 314291440 bytes, 612 GCs, 2070776/6628048 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.000 elapsed), 0.128 MUT (0.144 elapsed), 0.059 GC (0.070 elapsed) :ghc>> <<ghc: 349558872 bytes, 681 GCs, 1397597/4106384 avg/max bytes residency (7 samples), 11M in use, 0.000 INIT (0.004 elapsed), 0.124 MUT (0.133 elapsed), 0.047 GC (0.053 elapsed) :ghc>> hledger -f data/10000x1000x10.journal stats <<ghc: 3070026824 bytes, 5973 GCs, 12698030/62951784 avg/max bytes residency (10 samples), 124M in use, 0.000 INIT (0.002 elapsed), 1.268 MUT (1.354 elapsed), 0.514 GC (0.587 elapsed) :ghc>> <<ghc: 3424013128 bytes, 6658 GCs, 11405501/41071624 avg/max bytes residency (11 samples), 111M in use, 0.000 INIT (0.001 elapsed), 1.343 MUT (1.406 elapsed), 0.511 GC (0.573 elapsed) :ghc>> hledger -f data/100000x1000x10.journal stats <<ghc: 30753387392 bytes, 59811 GCs, 117615462/666703600 avg/max bytes residency (14 samples), 1588M in use, 0.000 INIT (0.000 elapsed), 12.068 MUT (12.238 elapsed), 6.015 GC (7.190 elapsed) :ghc>> <<ghc: 34306530696 bytes, 66727 GCs, 76806196/414629312 avg/max bytes residency (14 samples), 1009M in use, 0.000 INIT (0.010 elapsed), 14.357 MUT (16.370 elapsed), 5.298 GC (6.534 elapsed) :ghc>>
2016-05-25 01:58:23 +03:00
import Data.Text (Text)
import Data.String
import Data.List
lib: textification begins! account names The first of several conversions from String to (strict) Text, hopefully reducing space and time usage. This one shows a small improvement, with GHC 7.10.3 and text-1.2.2.1: hledger -f data/100x100x10.journal stats string: <<ghc: 39471064 bytes, 77 GCs, 198421/275048 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.001 elapsed), 0.015 MUT (0.020 elapsed), 0.010 GC (0.014 elapsed) :ghc>> text: <<ghc: 39268024 bytes, 77 GCs, 197018/270840 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.002 elapsed), 0.016 MUT (0.022 elapsed), 0.009 GC (0.011 elapsed) :ghc>> hledger -f data/1000x100x10.journal stats string: <<ghc: 318555920 bytes, 617 GCs, 2178997/7134472 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.001 elapsed), 0.129 MUT (0.136 elapsed), 0.067 GC (0.077 elapsed) :ghc>> text: <<ghc: 314248496 bytes, 612 GCs, 2074045/6617960 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.003 elapsed), 0.137 MUT (0.145 elapsed), 0.067 GC (0.079 elapsed) :ghc>> hledger -f data/10000x100x10.journal stats string: <<ghc: 3114763608 bytes, 6026 GCs, 18858950/75552024 avg/max bytes residency (11 samples), 201M in use, 0.000 INIT (0.000 elapsed), 1.331 MUT (1.372 elapsed), 0.699 GC (0.812 elapsed) :ghc>> text: <<ghc: 3071468920 bytes, 5968 GCs, 14120344/62951360 avg/max bytes residency (9 samples), 124M in use, 0.000 INIT (0.003 elapsed), 1.272 MUT (1.349 elapsed), 0.513 GC (0.578 elapsed) :ghc>> hledger -f data/100000x100x10.journal stats string: <<ghc: 31186579432 bytes, 60278 GCs, 135332581/740228992 avg/max bytes residency (13 samples), 1697M in use, 0.000 INIT (0.008 elapsed), 14.677 MUT (15.508 elapsed), 7.081 GC (8.074 elapsed) :ghc>> text: <<ghc: 30753427672 bytes, 59763 GCs, 117595958/666457240 avg/max bytes residency (14 samples), 1588M in use, 0.000 INIT (0.008 elapsed), 13.713 MUT (13.966 elapsed), 6.220 GC (7.108 elapsed) :ghc>>
2016-05-24 04:16:21 +03:00
import qualified Data.Text as T
2011-05-28 08:11:44 +04:00
import Data.Time.Calendar
import Data.Time.LocalTime
import Safe
import Text.Megaparsec hiding (parse)
import Text.Megaparsec.Char
import Text.Megaparsec.Custom
2011-05-28 08:11:44 +04:00
import Text.Printf
import System.FilePath
import "Glob" System.FilePath.Glob hiding (match)
2010-11-15 10:18:35 +03:00
import Hledger.Data
import Hledger.Read.Common
import Hledger.Read.TimeclockReader (timeclockfilep)
import Hledger.Read.TimedotReader (timedotfilep)
2011-05-28 08:11:44 +04:00
import Hledger.Utils
2010-03-13 02:46:20 +03:00
-- $setup
-- >>> :set -XOverloadedStrings
2010-03-13 02:46:20 +03:00
2016-04-23 21:27:39 +03:00
--- * reader
2010-03-13 02:46:20 +03:00
reader :: Reader
reader = Reader
{rFormat = "journal"
,rExtensions = ["journal", "j", "hledger", "ledger"]
,rParser = parse
,rExperimental = False
}
-- | Parse and post-process a "Journal" from hledger's journal file
-- format, or give an error.
parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal
parse iopts = parseAndFinaliseJournal journalp' iopts
where
journalp' = do
-- reverse parsed aliases to ensure that they are applied in order given on commandline
mapM_ addAccountAlias (reverse $ aliasesFromOpts iopts)
journalp
-- | Get the account name aliases from options, if any.
aliasesFromOpts :: InputOpts -> [AccountAlias]
aliasesFromOpts = map (\a -> fromparse $ runParser accountaliasp ("--alias "++quoteIfNeeded a) $ T.pack a)
. aliases_
2010-03-13 02:46:20 +03:00
2016-04-23 21:27:39 +03:00
--- * parsers
--- ** journal
-- | A journal parser. Accumulates and returns a "ParsedJournal",
-- which should be finalised/validated before use.
--
-- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n"
-- Right (Right Journal with 1 transactions, 1 accounts)
--
journalp :: MonadIO m => ErroringJournalParser m ParsedJournal
journalp = do
many addJournalItemP
eof
get
-- | A side-effecting parser; parses any kind of journal item
-- and updates the parse state accordingly.
addJournalItemP :: MonadIO m => ErroringJournalParser m ()
lib: textification: comments and tags No change. hledger -f data/100x100x10.journal stats <<ghc: 42859576 bytes, 84 GCs, 193781/269984 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.001 elapsed), 0.016 MUT (0.020 elapsed), 0.009 GC (0.011 elapsed) :ghc>> <<ghc: 42859576 bytes, 84 GCs, 193781/269984 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.001 elapsed), 0.015 MUT (0.018 elapsed), 0.009 GC (0.013 elapsed) :ghc>> hledger -f data/1000x1000x10.journal stats <<ghc: 349576344 bytes, 681 GCs, 1407388/4091680 avg/max bytes residency (7 samples), 11M in use, 0.000 INIT (0.000 elapsed), 0.124 MUT (0.130 elapsed), 0.047 GC (0.055 elapsed) :ghc>> <<ghc: 349576280 bytes, 681 GCs, 1407388/4091680 avg/max bytes residency (7 samples), 11M in use, 0.000 INIT (0.000 elapsed), 0.126 MUT (0.132 elapsed), 0.049 GC (0.058 elapsed) :ghc>> hledger -f data/10000x1000x10.journal stats <<ghc: 3424030664 bytes, 6658 GCs, 11403359/41071624 avg/max bytes residency (11 samples), 111M in use, 0.000 INIT (0.000 elapsed), 1.207 MUT (1.228 elapsed), 0.473 GC (0.528 elapsed) :ghc>> <<ghc: 3424030760 bytes, 6658 GCs, 11403874/41077288 avg/max bytes residency (11 samples), 111M in use, 0.000 INIT (0.002 elapsed), 1.234 MUT (1.256 elapsed), 0.470 GC (0.520 elapsed) :ghc>> hledger -f data/100000x1000x10.journal stats <<ghc: 34306547448 bytes, 66727 GCs, 76805504/414629288 avg/max bytes residency (14 samples), 1009M in use, 0.000 INIT (0.003 elapsed), 12.615 MUT (12.813 elapsed), 4.656 GC (5.291 elapsed) :ghc>> <<ghc: 34306547320 bytes, 66727 GCs, 76805504/414629288 avg/max bytes residency (14 samples), 1009M in use, 0.000 INIT (0.009 elapsed), 12.802 MUT (13.065 elapsed), 4.774 GC (5.441 elapsed) :ghc>>
2016-05-25 03:09:20 +03:00
addJournalItemP =
-- all journal line types can be distinguished by the first
-- character, can use choice without backtracking
choice [
directivep
, transactionp >>= modify' . addTransaction
, transactionmodifierp >>= modify' . addTransactionModifier
, periodictransactionp >>= modify' . addPeriodicTransaction
, marketpricedirectivep >>= modify' . addMarketPrice
, void (lift emptyorcommentlinep)
, void (lift multilinecommentp)
] <?> "transaction or directive"
2016-04-23 21:27:39 +03:00
--- ** directives
2016-04-23 03:43:16 +03:00
-- | Parse any journal directive and update the parse state accordingly.
-- Cf http://hledger.org/manual.html#directives,
-- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives
directivep :: MonadIO m => ErroringJournalParser m ()
directivep = (do
optional $ char '!'
choice [
includedirectivep
,aliasdirectivep
,endaliasesdirectivep
,accountdirectivep
,applyaccountdirectivep
,commoditydirectivep
,endapplyaccountdirectivep
,tagdirectivep
,endtagdirectivep
,defaultyeardirectivep
,defaultcommoditydirectivep
,commodityconversiondirectivep
,ignoredpricecommoditydirectivep
]
) <?> "directive"
2010-03-13 02:46:20 +03:00
includedirectivep :: MonadIO m => ErroringJournalParser m ()
includedirectivep = do
2011-08-04 11:49:10 +04:00
string "include"
lift (skipSome spacenonewline)
filename <- T.unpack <$> takeWhileP Nothing (/= '\n') -- don't consume newline yet
2018-09-30 04:32:08 +03:00
parentoff <- getOffset
parentpos <- getSourcePos
2018-09-30 04:32:08 +03:00
filepaths <- getFilePaths parentoff parentpos filename
forM_ filepaths $ parseChild parentpos
void newline
where
2018-09-30 04:32:08 +03:00
getFilePaths
:: MonadIO m => Int -> SourcePos -> FilePath -> JournalParser m [FilePath]
getFilePaths parseroff parserpos filename = do
let curdir = takeDirectory (sourceName parserpos)
filename' <- lift $ expandHomePath filename
`orRethrowIOError` (show parserpos ++ " locating " ++ filename)
-- Compiling filename as a glob pattern works even if it is a literal
fileglob <- case tryCompileWith compDefault{errorRecovery=False} filename' of
Right x -> pure x
Left e -> customFailure $
2018-09-30 04:32:08 +03:00
parseErrorAt parseroff $ "Invalid glob pattern: " ++ e
-- Get all matching files in the current working directory, sorting in
-- lexicographic order to simulate the output of 'ls'.
filepaths <- liftIO $ sort <$> globDir1 fileglob curdir
if (not . null) filepaths
then pure filepaths
2018-09-30 04:32:08 +03:00
else customFailure $ parseErrorAt parseroff $
"No existing files match pattern: " ++ filename
parseChild :: MonadIO m => SourcePos -> FilePath -> ErroringJournalParser m ()
parseChild parentpos filepath = do
parentj <- get
let parentfilestack = jincludefilestack parentj
when (filepath `elem` parentfilestack) $
fail ("Cyclic include: " ++ filepath)
childInput <- lift $ readFilePortably filepath
`orRethrowIOError` (show parentpos ++ " reading " ++ filepath)
let initChildj = newJournalWithParseStateFrom filepath parentj
let parser = choiceInState
[ journalp
, timeclockfilep
, timedotfilep
] -- can't include a csv file yet, that reader is special
updatedChildj <- journalAddFile (filepath, childInput) <$>
parseIncludeFile parser initChildj filepath childInput
-- discard child's parse info, combine other fields
put $ updatedChildj <> parentj
newJournalWithParseStateFrom :: FilePath -> Journal -> Journal
newJournalWithParseStateFrom filepath j = mempty{
jparsedefaultyear = jparsedefaultyear j
,jparsedefaultcommodity = jparsedefaultcommodity j
,jparseparentaccounts = jparseparentaccounts j
,jparsealiases = jparsealiases j
,jcommodities = jcommodities j
-- ,jparsetransactioncount = jparsetransactioncount j
,jparsetimeclockentries = jparsetimeclockentries j
,jincludefilestack = filepath : jincludefilestack j
}
-- | Lift an IO action into the exception monad, rethrowing any IO
-- error with the given message prepended.
orRethrowIOError :: MonadIO m => IO a -> String -> TextParser m a
orRethrowIOError io msg = do
eResult <- liftIO $ (Right <$> io) `C.catch` \(e::C.IOException) -> pure $ Left $ printf "%s:\n%s" msg (show e)
case eResult of
Right res -> pure res
Left errMsg -> fail errMsg
accountdirectivep :: JournalParser m ()
accountdirectivep = do
string "account"
lift (skipSome spacenonewline)
journal: account directives can declare account types Previously you had to use one of the standard english account names (assets, liabilities..) for top-level accounts, if you wanted to use the bs/bse/cf/is commands. Now, account directives can specify which of the big five categories an account belongs to - asset, liability, equity, revenue or expense - by writing one of the letters A, L, E, R or X two or more spaces after the account name (where the numeric account code used to be). This might change. Some thoughts influencing the current syntax: - easy to type and read - does not require multiple lines - does not depend on any particular account numbering scheme - allows more types later if needed - still anglocentric, but only a little - could be treated as syntactic sugar for account tags later - seems to be compatible with (ignored by) current Ledger The current design permits unlimited account type declarations anywhere in the account tree. So you could declare a liability account somewhere under assets, and maybe a revenue account under that, and another asset account even further down. In such cases you start to see oddities like accounts appearing in multiple places in a tree-mode report. In theory the reports will still behave reasonably, but this has not been tested too hard. In any case this is clearly too much freedom. I have left it this way, for now, in case it helps with: - modelling contra accounts ? - multiple files. I suspect the extra expressiveness may come in handy when combining multiple files with account type declarations, rewriting account names, apply parent accounts etc. If we only allowed type declarations on top-level accounts, or only allowed a single account of each type, complications seem likely.
2018-09-27 04:34:48 +03:00
-- the account name, possibly modified by preceding alias or apply account directives
acct <- modifiedaccountnamep
-- and maybe something else after two or more spaces ?
matype :: Maybe AccountType <- lift $ fmap (fromMaybe Nothing) $ optional $ try $ do
journal: account directives can declare account types Previously you had to use one of the standard english account names (assets, liabilities..) for top-level accounts, if you wanted to use the bs/bse/cf/is commands. Now, account directives can specify which of the big five categories an account belongs to - asset, liability, equity, revenue or expense - by writing one of the letters A, L, E, R or X two or more spaces after the account name (where the numeric account code used to be). This might change. Some thoughts influencing the current syntax: - easy to type and read - does not require multiple lines - does not depend on any particular account numbering scheme - allows more types later if needed - still anglocentric, but only a little - could be treated as syntactic sugar for account tags later - seems to be compatible with (ignored by) current Ledger The current design permits unlimited account type declarations anywhere in the account tree. So you could declare a liability account somewhere under assets, and maybe a revenue account under that, and another asset account even further down. In such cases you start to see oddities like accounts appearing in multiple places in a tree-mode report. In theory the reports will still behave reasonably, but this has not been tested too hard. In any case this is clearly too much freedom. I have left it this way, for now, in case it helps with: - modelling contra accounts ? - multiple files. I suspect the extra expressiveness may come in handy when combining multiple files with account type declarations, rewriting account names, apply parent accounts etc. If we only allowed type declarations on top-level accounts, or only allowed a single account of each type, complications seem likely.
2018-09-27 04:34:48 +03:00
skipSome spacenonewline -- at least one more space in addition to the one consumed by modifiedaccountp
choice [
-- a numeric account code, as supported in 1.9-1.10 ? currently ignored
some digitChar >> return Nothing
-- a letter account type code (ALERX), as added in 1.11 ?
,char 'A' >> return (Just Asset)
,char 'L' >> return (Just Liability)
,char 'E' >> return (Just Equity)
,char 'R' >> return (Just Revenue)
,char 'X' >> return (Just Expense)
]
-- and maybe a comment on this and/or following lines ? (ignore for now)
(_cmt, _tags) <- lift transactioncommentp
-- and maybe Ledger-style subdirectives ? (ignore)
skipMany indentedlinep
journal: account directives can declare account types Previously you had to use one of the standard english account names (assets, liabilities..) for top-level accounts, if you wanted to use the bs/bse/cf/is commands. Now, account directives can specify which of the big five categories an account belongs to - asset, liability, equity, revenue or expense - by writing one of the letters A, L, E, R or X two or more spaces after the account name (where the numeric account code used to be). This might change. Some thoughts influencing the current syntax: - easy to type and read - does not require multiple lines - does not depend on any particular account numbering scheme - allows more types later if needed - still anglocentric, but only a little - could be treated as syntactic sugar for account tags later - seems to be compatible with (ignored by) current Ledger The current design permits unlimited account type declarations anywhere in the account tree. So you could declare a liability account somewhere under assets, and maybe a revenue account under that, and another asset account even further down. In such cases you start to see oddities like accounts appearing in multiple places in a tree-mode report. In theory the reports will still behave reasonably, but this has not been tested too hard. In any case this is clearly too much freedom. I have left it this way, for now, in case it helps with: - modelling contra accounts ? - multiple files. I suspect the extra expressiveness may come in handy when combining multiple files with account type declarations, rewriting account names, apply parent accounts etc. If we only allowed type declarations on top-level accounts, or only allowed a single account of each type, complications seem likely.
2018-09-27 04:34:48 +03:00
-- update the journal
case matype of
Nothing -> return ()
Just atype -> addDeclaredAccountType acct atype
pushDeclaredAccount acct
indentedlinep :: JournalParser m String
indentedlinep = lift (skipSome spacenonewline) >> (rstrip <$> lift restofline)
-- | Parse a one-line or multi-line commodity directive.
--
-- >>> Right _ <- rjp commoditydirectivep "commodity $1.00"
-- >>> Right _ <- rjp commoditydirectivep "commodity $\n format $1.00"
-- >>> Right _ <- rjp commoditydirectivep "commodity $\n\n" -- a commodity with no format
-- >>> Right _ <- rjp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ?
2018-06-06 08:52:28 +03:00
commoditydirectivep :: JournalParser m ()
commoditydirectivep = commoditydirectiveonelinep <|> commoditydirectivemultilinep
-- | Parse a one-line commodity directive.
--
-- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00"
-- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00 ; blah\n"
2018-06-06 08:52:28 +03:00
commoditydirectiveonelinep :: JournalParser m ()
commoditydirectiveonelinep = do
2018-09-30 04:32:08 +03:00
(off, Amount{acommodity,astyle}) <- try $ do
string "commodity"
lift (skipSome spacenonewline)
2018-09-30 04:32:08 +03:00
off <- getOffset
amount <- amountp
2018-09-30 04:32:08 +03:00
pure $ (off, amount)
lift (skipMany spacenonewline)
_ <- lift followingcommentp
let comm = Commodity{csymbol=acommodity, cformat=Just $ dbg2 "style from commodity directive" astyle}
if asdecimalpoint astyle == Nothing
2018-09-30 04:32:08 +03:00
then customFailure $ parseErrorAt off pleaseincludedecimalpoint
else modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j})
pleaseincludedecimalpoint :: String
pleaseincludedecimalpoint = "to avoid ambiguity, please include a decimal separator in commodity directives"
-- | Parse a multi-line commodity directive, containing 0 or more format subdirectives.
--
-- >>> Right _ <- rjp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah"
2018-06-06 08:52:28 +03:00
commoditydirectivemultilinep :: JournalParser m ()
commoditydirectivemultilinep = do
string "commodity"
lift (skipSome spacenonewline)
sym <- lift commoditysymbolp
_ <- lift followingcommentp
mformat <- lastMay <$> many (indented $ formatdirectivep sym)
let comm = Commodity{csymbol=sym, cformat=mformat}
modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j})
where
indented = (lift (skipSome spacenonewline) >>)
-- | Parse a format (sub)directive, throwing a parse error if its
-- symbol does not match the one given.
2018-06-06 08:52:28 +03:00
formatdirectivep :: CommoditySymbol -> JournalParser m AmountStyle
formatdirectivep expectedsym = do
string "format"
lift (skipSome spacenonewline)
2018-09-30 04:32:08 +03:00
off <- getOffset
Amount{acommodity,astyle} <- amountp
_ <- lift followingcommentp
if acommodity==expectedsym
then
if asdecimalpoint astyle == Nothing
2018-09-30 04:32:08 +03:00
then customFailure $ parseErrorAt off pleaseincludedecimalpoint
else return $ dbg2 "style from format subdirective" astyle
2018-09-30 04:32:08 +03:00
else customFailure $ parseErrorAt off $
printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity
keywordp :: String -> JournalParser m ()
keywordp = (() <$) . string . fromString
spacesp :: JournalParser m ()
spacesp = () <$ lift (skipSome spacenonewline)
-- | Backtracking parser similar to string, but allows varying amount of space between words
keywordsp :: String -> JournalParser m ()
keywordsp = try . sequence_ . intersperse spacesp . map keywordp . words
applyaccountdirectivep :: JournalParser m ()
applyaccountdirectivep = do
keywordsp "apply account" <?> "apply account directive"
lift (skipSome spacenonewline)
parent <- lift accountnamep
2011-08-04 11:49:10 +04:00
newline
pushParentAccount parent
2010-03-13 02:46:20 +03:00
endapplyaccountdirectivep :: JournalParser m ()
endapplyaccountdirectivep = do
keywordsp "end apply account" <?> "end apply account directive"
2011-08-04 11:49:10 +04:00
popParentAccount
2010-03-13 02:46:20 +03:00
aliasdirectivep :: JournalParser m ()
aliasdirectivep = do
string "alias"
lift (skipSome spacenonewline)
alias <- lift accountaliasp
addAccountAlias alias
accountaliasp :: TextParser m AccountAlias
accountaliasp = regexaliasp <|> basicaliasp
basicaliasp :: TextParser m AccountAlias
basicaliasp = do
2018-07-16 17:28:58 +03:00
-- dbgparse 0 "basicaliasp"
old <- rstrip <$> (some $ noneOf ("=" :: [Char]))
char '='
skipMany spacenonewline
2018-09-30 04:32:08 +03:00
new <- rstrip <$> anySingle `manyTill` eolof -- eol in journal, eof in command lines, normally
lib: textification begins! account names The first of several conversions from String to (strict) Text, hopefully reducing space and time usage. This one shows a small improvement, with GHC 7.10.3 and text-1.2.2.1: hledger -f data/100x100x10.journal stats string: <<ghc: 39471064 bytes, 77 GCs, 198421/275048 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.001 elapsed), 0.015 MUT (0.020 elapsed), 0.010 GC (0.014 elapsed) :ghc>> text: <<ghc: 39268024 bytes, 77 GCs, 197018/270840 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.002 elapsed), 0.016 MUT (0.022 elapsed), 0.009 GC (0.011 elapsed) :ghc>> hledger -f data/1000x100x10.journal stats string: <<ghc: 318555920 bytes, 617 GCs, 2178997/7134472 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.001 elapsed), 0.129 MUT (0.136 elapsed), 0.067 GC (0.077 elapsed) :ghc>> text: <<ghc: 314248496 bytes, 612 GCs, 2074045/6617960 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.003 elapsed), 0.137 MUT (0.145 elapsed), 0.067 GC (0.079 elapsed) :ghc>> hledger -f data/10000x100x10.journal stats string: <<ghc: 3114763608 bytes, 6026 GCs, 18858950/75552024 avg/max bytes residency (11 samples), 201M in use, 0.000 INIT (0.000 elapsed), 1.331 MUT (1.372 elapsed), 0.699 GC (0.812 elapsed) :ghc>> text: <<ghc: 3071468920 bytes, 5968 GCs, 14120344/62951360 avg/max bytes residency (9 samples), 124M in use, 0.000 INIT (0.003 elapsed), 1.272 MUT (1.349 elapsed), 0.513 GC (0.578 elapsed) :ghc>> hledger -f data/100000x100x10.journal stats string: <<ghc: 31186579432 bytes, 60278 GCs, 135332581/740228992 avg/max bytes residency (13 samples), 1697M in use, 0.000 INIT (0.008 elapsed), 14.677 MUT (15.508 elapsed), 7.081 GC (8.074 elapsed) :ghc>> text: <<ghc: 30753427672 bytes, 59763 GCs, 117595958/666457240 avg/max bytes residency (14 samples), 1588M in use, 0.000 INIT (0.008 elapsed), 13.713 MUT (13.966 elapsed), 6.220 GC (7.108 elapsed) :ghc>>
2016-05-24 04:16:21 +03:00
return $ BasicAlias (T.pack old) (T.pack new)
regexaliasp :: TextParser m AccountAlias
regexaliasp = do
2018-07-16 17:28:58 +03:00
-- dbgparse 0 "regexaliasp"
char '/'
re <- some $ noneOf ("/\n\r" :: [Char]) -- paranoid: don't try to read past line end
char '/'
skipMany spacenonewline
char '='
skipMany spacenonewline
2018-09-30 04:32:08 +03:00
repl <- anySingle `manyTill` eolof
return $ RegexAlias re repl
endaliasesdirectivep :: JournalParser m ()
endaliasesdirectivep = do
keywordsp "end aliases" <?> "end aliases directive"
clearAccountAliases
tagdirectivep :: JournalParser m ()
tagdirectivep = do
2011-08-04 11:49:10 +04:00
string "tag" <?> "tag directive"
lift (skipSome spacenonewline)
_ <- lift $ some nonspace
lift restofline
return ()
2007-02-09 04:23:12 +03:00
endtagdirectivep :: JournalParser m ()
endtagdirectivep = do
(keywordsp "end tag" <|> keywordp "pop") <?> "end tag or pop directive"
lift restofline
return ()
2011-08-04 11:49:10 +04:00
defaultyeardirectivep :: JournalParser m ()
defaultyeardirectivep = do
2011-08-04 11:49:10 +04:00
char 'Y' <?> "default year"
lift (skipMany spacenonewline)
y <- some digitChar
2011-08-04 11:49:10 +04:00
let y' = read y
failIfInvalidYear y
setYear y'
2018-06-06 08:52:28 +03:00
defaultcommoditydirectivep :: JournalParser m ()
defaultcommoditydirectivep = do
2011-08-04 11:49:10 +04:00
char 'D' <?> "default commodity"
lift (skipSome spacenonewline)
2018-09-30 04:32:08 +03:00
off <- getOffset
Amount{acommodity,astyle} <- amountp
lift restofline
if asdecimalpoint astyle == Nothing
2018-09-30 04:32:08 +03:00
then customFailure $ parseErrorAt off pleaseincludedecimalpoint
else setDefaultCommodityAndStyle (acommodity, astyle)
2007-02-09 04:23:12 +03:00
2018-06-06 08:52:28 +03:00
marketpricedirectivep :: JournalParser m MarketPrice
marketpricedirectivep = do
char 'P' <?> "market price"
lift (skipMany spacenonewline)
date <- try (do {LocalTime d _ <- datetimep; return d}) <|> datep -- a time is ignored
lift (skipSome spacenonewline)
symbol <- lift commoditysymbolp
lift (skipMany spacenonewline)
price <- amountp
lift restofline
return $ MarketPrice date symbol price
ignoredpricecommoditydirectivep :: JournalParser m ()
ignoredpricecommoditydirectivep = do
char 'N' <?> "ignored-price commodity"
lift (skipSome spacenonewline)
lift commoditysymbolp
lift restofline
return ()
2018-06-06 08:52:28 +03:00
commodityconversiondirectivep :: JournalParser m ()
commodityconversiondirectivep = do
char 'C' <?> "commodity conversion"
lift (skipSome spacenonewline)
amountp
lift (skipMany spacenonewline)
char '='
lift (skipMany spacenonewline)
amountp
lift restofline
return ()
2016-04-23 21:27:39 +03:00
--- ** transactions
2016-04-23 03:43:16 +03:00
transactionmodifierp :: JournalParser m TransactionModifier
transactionmodifierp = do
2011-08-04 11:49:10 +04:00
char '=' <?> "modifier transaction"
lift (skipMany spacenonewline)
querytxt <- lift $ T.strip <$> descriptionp
(_comment, _tags) <- lift transactioncommentp -- TODO apply these to modified txns ?
postings <- postingsp Nothing
return $ TransactionModifier querytxt postings
-- | Parse a periodic transaction
--
-- This reuses periodexprp which parses period expressions on the command line.
-- This is awkward because periodexprp supports relative and partial dates,
-- which we don't really need here, and it doesn't support the notion of a
-- default year set by a Y directive, which we do need to consider here.
-- We resolve it as follows: in periodic transactions' period expressions,
-- if there is a default year Y in effect, partial/relative dates are calculated
-- relative to Y/1/1. If not, they are calculated related to today as usual.
periodictransactionp :: MonadIO m => JournalParser m PeriodicTransaction
periodictransactionp = do
-- first line
2011-08-04 11:49:10 +04:00
char '~' <?> "periodic transaction"
lift $ skipMany spacenonewline
-- a period expression
2018-09-30 04:32:08 +03:00
off <- getOffset
-- if there's a default year in effect, use Y/1/1 as base for partial/relative dates
today <- liftIO getCurrentDay
mdefaultyear <- getYear
let refdate = case mdefaultyear of
Nothing -> today
Just y -> fromGregorian y 1 1
periodExcerpt <- lift $ excerpt_ $
singlespacedtextsatisfyingp (\c -> c /= ';' && c /= '\n')
let periodtxt = T.strip $ getExcerptText periodExcerpt
-- first parsing with 'singlespacedtextp', then "re-parsing" with
-- 'periodexprp' saves 'periodexprp' from having to respect the single-
-- and double-space parsing rules
(interval, span) <- lift $ reparseExcerpt periodExcerpt $ do
pexp <- periodexprp refdate
(<|>) eof $ do
offset1 <- getOffset
void takeRest
offset2 <- getOffset
customFailure $ parseErrorAtRegion offset1 offset2 $
"remainder of period expression cannot be parsed"
<> "\nperhaps you need to terminate the period expression with a double space?"
pure pexp
-- In periodic transactions, the period expression has an additional constraint:
case checkPeriodicTransactionStartDate interval span periodtxt of
2018-09-30 04:32:08 +03:00
Just e -> customFailure $ parseErrorAt off e
Nothing -> pure ()
-- The line can end here, or it can continue with one or more spaces
-- and then zero or more of the following fields. A bit awkward.
(status, code, description, (comment, tags)) <- lift $
(<|>) (eolof >> return (Unmarked, "", "", ("", []))) $ do
skipSome spacenonewline
s <- statusp
c <- codep
desc <- T.strip <$> descriptionp
(cmt, ts) <- transactioncommentp
return (s,c,desc,(cmt,ts))
-- next lines; use same year determined above
postings <- postingsp (Just $ first3 $ toGregorian refdate)
return $ nullperiodictransaction{
ptperiodexpr=periodtxt
,ptinterval=interval
,ptspan=span
,ptstatus=status
,ptcode=code
,ptdescription=description
,ptcomment=comment
,pttags=tags
,ptpostings=postings
}
-- | Parse a (possibly unbalanced) transaction.
2018-06-06 08:52:28 +03:00
transactionp :: JournalParser m Transaction
transactionp = do
2018-07-16 17:28:58 +03:00
-- dbgparse 0 "transactionp"
2018-09-30 04:32:08 +03:00
startpos <- getSourcePos
date <- datep <?> "transaction"
2018-05-24 07:36:19 +03:00
edate <- optional (lift $ secondarydatep date) <?> "secondary date"
lookAhead (lift spacenonewline <|> newline) <?> "whitespace or newline"
status <- lift statusp <?> "cleared status"
code <- lift codep <?> "transaction code"
2018-06-06 08:52:28 +03:00
description <- lift $ T.strip <$> descriptionp
(comment, tags) <- lift transactioncommentp
let year = first3 $ toGregorian date
postings <- postingsp (Just year)
2018-09-30 04:32:08 +03:00
endpos <- getSourcePos
let sourcepos = journalSourcePos startpos endpos
return $ txnTieKnot $ Transaction 0 "" sourcepos date edate status code description comment tags postings
2007-02-09 04:23:12 +03:00
2016-04-23 21:27:39 +03:00
--- ** postings
2007-02-09 04:23:12 +03:00
-- Parse the following whitespace-beginning lines as postings, posting
-- tags, and/or comments (inferring year, if needed, from the given date).
2018-06-06 08:52:28 +03:00
postingsp :: Maybe Year -> JournalParser m [Posting]
postingsp mTransactionYear = many (postingp mTransactionYear) <?> "postings"
2018-06-06 08:52:28 +03:00
-- linebeginningwithspaces :: JournalParser m String
-- linebeginningwithspaces = do
-- sp <- lift (skipSome spacenonewline)
-- c <- nonspace
-- cs <- lift restofline
-- return $ sp ++ (c:cs) ++ "\n"
2007-02-09 04:23:12 +03:00
2018-06-06 08:52:28 +03:00
postingp :: Maybe Year -> JournalParser m Posting
postingp mTransactionYear = do
-- lift $ dbgparse 0 "postingp"
(status, account) <- try $ do
lift (skipSome spacenonewline)
status <- lift statusp
lift (skipMany spacenonewline)
account <- modifiedaccountnamep
return (status, account)
lib: textification begins! account names The first of several conversions from String to (strict) Text, hopefully reducing space and time usage. This one shows a small improvement, with GHC 7.10.3 and text-1.2.2.1: hledger -f data/100x100x10.journal stats string: <<ghc: 39471064 bytes, 77 GCs, 198421/275048 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.001 elapsed), 0.015 MUT (0.020 elapsed), 0.010 GC (0.014 elapsed) :ghc>> text: <<ghc: 39268024 bytes, 77 GCs, 197018/270840 avg/max bytes residency (3 samples), 2M in use, 0.000 INIT (0.002 elapsed), 0.016 MUT (0.022 elapsed), 0.009 GC (0.011 elapsed) :ghc>> hledger -f data/1000x100x10.journal stats string: <<ghc: 318555920 bytes, 617 GCs, 2178997/7134472 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.001 elapsed), 0.129 MUT (0.136 elapsed), 0.067 GC (0.077 elapsed) :ghc>> text: <<ghc: 314248496 bytes, 612 GCs, 2074045/6617960 avg/max bytes residency (7 samples), 16M in use, 0.000 INIT (0.003 elapsed), 0.137 MUT (0.145 elapsed), 0.067 GC (0.079 elapsed) :ghc>> hledger -f data/10000x100x10.journal stats string: <<ghc: 3114763608 bytes, 6026 GCs, 18858950/75552024 avg/max bytes residency (11 samples), 201M in use, 0.000 INIT (0.000 elapsed), 1.331 MUT (1.372 elapsed), 0.699 GC (0.812 elapsed) :ghc>> text: <<ghc: 3071468920 bytes, 5968 GCs, 14120344/62951360 avg/max bytes residency (9 samples), 124M in use, 0.000 INIT (0.003 elapsed), 1.272 MUT (1.349 elapsed), 0.513 GC (0.578 elapsed) :ghc>> hledger -f data/100000x100x10.journal stats string: <<ghc: 31186579432 bytes, 60278 GCs, 135332581/740228992 avg/max bytes residency (13 samples), 1697M in use, 0.000 INIT (0.008 elapsed), 14.677 MUT (15.508 elapsed), 7.081 GC (8.074 elapsed) :ghc>> text: <<ghc: 30753427672 bytes, 59763 GCs, 117595958/666457240 avg/max bytes residency (14 samples), 1588M in use, 0.000 INIT (0.008 elapsed), 13.713 MUT (13.966 elapsed), 6.220 GC (7.108 elapsed) :ghc>>
2016-05-24 04:16:21 +03:00
let (ptype, account') = (accountNamePostingType account, textUnbracket account)
lift (skipMany spacenonewline)
amount <- option missingmixedamt $ Mixed . (:[]) <$> amountp
lift (skipMany spacenonewline)
massertion <- optional $ balanceassertionp
_ <- fixedlotpricep
lift (skipMany spacenonewline)
(comment,tags,mdate,mdate2) <- lift $ postingcommentp mTransactionYear
return posting
{ pdate=mdate
, pdate2=mdate2
, pstatus=status
, paccount=account'
, pamount=amount
, pcomment=comment
, ptype=ptype
, ptags=tags
, pbalanceassertion=massertion
}
2018-08-20 16:18:41 +03:00
--- * tests
2018-08-20 10:22:31 +03:00
2018-09-06 23:08:26 +03:00
tests_JournalReader = tests "JournalReader" [
2018-08-20 10:22:31 +03:00
let p = lift accountnamep :: JournalParser IO AccountName in
tests "accountnamep" [
test "basic" $ expectParse p "a:b:c"
,_test "empty inner component" $ expectParseError p "a::c" "" -- TODO
,_test "empty leading component" $ expectParseError p ":b:c" "x"
,_test "empty trailing component" $ expectParseError p "a:b:" "x"
2018-08-20 10:22:31 +03:00
]
-- "Parse a date in YYYY/MM/DD format.
-- Hyphen (-) and period (.) are also allowed as separators.
-- The year may be omitted if a default year has been set.
-- Leading zeroes may be omitted."
,test "datep" $ do
test "YYYY/MM/DD" $ expectParseEq datep "2018/01/01" (fromGregorian 2018 1 1)
test "YYYY-MM-DD" $ expectParse datep "2018-01-01"
test "YYYY.MM.DD" $ expectParse datep "2018.01.01"
test "yearless date with no default year" $ expectParseError datep "1/1" "current year is unknown"
test "yearless date with default year" $ do
2018-09-30 04:32:08 +03:00
let s = "1/1"
ep <- parseWithState mempty{jparsedefaultyear=Just 2018} datep s
either (fail.("parse error at "++).customErrorBundlePretty) (const ok) ep
test "no leading zero" $ expectParse datep "2018/1/1"
,test "datetimep" $ do
let
good = expectParse datetimep
bad = (\t -> expectParseError datetimep t "")
good "2011/1/1 00:00"
good "2011/1/1 23:59:59"
bad "2011/1/1"
bad "2011/1/1 24:00:00"
bad "2011/1/1 00:60:00"
bad "2011/1/1 00:00:60"
bad "2011/1/1 3:5:7"
test "timezone is parsed but ignored" $ do
let t = LocalTime (fromGregorian 2018 1 1) (TimeOfDay 0 0 (fromIntegral 0))
expectParseEq datetimep "2018/1/1 00:00-0800" t
expectParseEq datetimep "2018/1/1 00:00+1234" t
2018-08-20 10:22:31 +03:00
,tests "periodictransactionp" [
2018-08-20 16:43:35 +03:00
test "more period text in comment after one space" $ expectParseEq periodictransactionp
2018-08-20 10:22:31 +03:00
"~ monthly from 2018/6 ;In 2019 we will change this\n"
nullperiodictransaction {
ptperiodexpr = "monthly from 2018/6"
,ptinterval = Months 1
2018-08-20 16:39:34 +03:00
,ptspan = DateSpan (Just $ fromGregorian 2018 6 1) Nothing
2018-08-20 10:22:31 +03:00
,ptdescription = ""
,ptcomment = "In 2019 we will change this\n"
}
,test "more period text in description after two spaces" $ expectParseEq periodictransactionp
2018-08-20 10:22:31 +03:00
"~ monthly from 2018/6 In 2019 we will change this\n"
nullperiodictransaction {
ptperiodexpr = "monthly from 2018/6"
,ptinterval = Months 1
2018-08-20 16:39:34 +03:00
,ptspan = DateSpan (Just $ fromGregorian 2018 6 1) Nothing
,ptdescription = "In 2019 we will change this"
,ptcomment = ""
2018-08-20 10:22:31 +03:00
}
,test "Next year in description" $ expectParseEq periodictransactionp
2018-08-20 10:22:31 +03:00
"~ monthly Next year blah blah\n"
nullperiodictransaction {
ptperiodexpr = "monthly"
,ptinterval = Months 1
,ptspan = DateSpan Nothing Nothing
,ptdescription = "Next year blah blah"
,ptcomment = ""
2018-08-20 10:22:31 +03:00
}
]
2018-08-20 10:22:31 +03:00
2018-08-20 16:18:41 +03:00
,tests "postingp" [
test "basic" $ expectParseEq (postingp Nothing)
" expenses:food:dining $10.00 ; a: a a \n ; b: b b \n"
posting{
paccount="expenses:food:dining",
pamount=Mixed [usd 10],
pcomment="a: a a\nb: b b\n",
ptags=[("a","a a"), ("b","b b")]
}
,test "posting dates" $ expectParseEq (postingp Nothing)
" a 1. ; date:2012/11/28, date2=2012/11/29,b:b\n"
nullposting{
paccount="a"
,pamount=Mixed [num 1]
,pcomment="date:2012/11/28, date2=2012/11/29,b:b\n"
,ptags=[("date", "2012/11/28"), ("date2=2012/11/29,b", "b")] -- TODO tag name parsed too greedily
2018-08-20 16:39:34 +03:00
,pdate=Just $ fromGregorian 2012 11 28
,pdate2=Nothing -- Just $ fromGregorian 2012 11 29
2018-08-20 16:18:41 +03:00
}
,test "posting dates bracket syntax" $ expectParseEq (postingp Nothing)
" a 1. ; [2012/11/28=2012/11/29]\n"
nullposting{
paccount="a"
,pamount=Mixed [num 1]
,pcomment="[2012/11/28=2012/11/29]\n"
,ptags=[]
,pdate= Just $ fromGregorian 2012 11 28
,pdate2=Just $ fromGregorian 2012 11 29
}
,test "quoted commodity symbol with digits" $ expectParse (postingp Nothing) " a 1 \"DE123\"\n"
,test "balance assertion and fixed lot price" $ expectParse (postingp Nothing) " a 1 \"DE123\" =$1 { =2.2 EUR} \n"
,test "balance assertion over entire contents of account" $ expectParse (postingp Nothing) " a $1 == $1\n"
2018-08-20 16:18:41 +03:00
]
,tests "transactionmodifierp" [
test "basic" $ expectParseEq transactionmodifierp
"= (some value expr)\n some:postings 1.\n"
nulltransactionmodifier {
tmquerytxt = "(some value expr)"
2018-11-13 22:42:23 +03:00
,tmpostingrules = [nullposting{paccount="some:postings", pamount=Mixed[num 1]}]
}
]
2018-08-20 16:18:41 +03:00
,tests "transactionp" [
2018-08-20 16:43:35 +03:00
test "just a date" $ expectParseEq transactionp "2015/1/1\n" nulltransaction{tdate=fromGregorian 2015 1 1}
2018-08-20 16:18:41 +03:00
2018-08-20 16:43:35 +03:00
,test "more complex" $ expectParseEq transactionp
2018-08-20 16:18:41 +03:00
(T.unlines [
"2012/05/14=2012/05/15 (code) desc ; tcomment1",
" ; tcomment2",
" ; ttag1: val1",
" * a $1.00 ; pcomment1",
" ; pcomment2",
" ; ptag1: val1",
" ; ptag2: val2"
])
nulltransaction{
tsourcepos=JournalSourcePos "" (1,7), -- XXX why 7 here ?
tprecedingcomment="",
2018-08-20 16:39:34 +03:00
tdate=fromGregorian 2012 5 14,
tdate2=Just $ fromGregorian 2012 5 15,
2018-08-20 16:18:41 +03:00
tstatus=Unmarked,
tcode="code",
tdescription="desc",
tcomment="tcomment1\ntcomment2\nttag1: val1\n",
ttags=[("ttag1","val1")],
tpostings=[
nullposting{
pdate=Nothing,
pstatus=Cleared,
paccount="a",
pamount=Mixed [usd 1],
pcomment="pcomment1\npcomment2\nptag1: val1\nptag2: val2\n",
ptype=RegularPosting,
ptags=[("ptag1","val1"),("ptag2","val2")],
ptransaction=Nothing
}
]
}
,test "parses a well-formed transaction" $
expect $ isRight $ rjp transactionp $ T.unlines
["2007/01/28 coopportunity"
," expenses:food:groceries $47.18"
," assets:checking $-47.18"
,""
]
,test "does not parse a following comment as part of the description" $
expectParseEqOn transactionp "2009/1/1 a ;comment\n b 1\n" tdescription "a"
,test "transactionp parses a following whitespace line" $
expect $ isRight $ rjp transactionp $ T.unlines
["2012/1/1"
," a 1"
," b"
," "
]
,test "comments everywhere, two postings parsed" $
expectParseEqOn transactionp
(T.unlines
["2009/1/1 x ; transaction comment"
," a 1 ; posting 1 comment"
," ; posting 1 comment 2"
," b"
," ; posting 2 comment"
])
(length . tpostings)
2
]
-- directives
,tests "directivep" [
test "supports !" $ do
expectParseE directivep "!account a\n"
expectParseE directivep "!D 1.0\n"
]
,test "accountdirectivep" $ do
test "with-comment" $ expectParse accountdirectivep "account a:b ; a comment\n"
test "does-not-support-!" $ expectParseError accountdirectivep "!account a:b\n" ""
test "account-sort-code" $ expectParse accountdirectivep "account a:b 1000\n"
test "account-type-code" $ expectParse accountdirectivep "account a:b A\n"
test "account-type-tag" $ expectParse accountdirectivep "account a:b ; type:asset\n"
,test "commodityconversiondirectivep" $ do
expectParse commodityconversiondirectivep "C 1h = $50.00\n"
,test "defaultcommoditydirectivep" $ do
expectParse defaultcommoditydirectivep "D $1,000.0\n"
expectParseError defaultcommoditydirectivep "D $1000\n" "please include a decimal separator"
,test "defaultyeardirectivep" $ do
test "1000" $ expectParse defaultyeardirectivep "Y 1000" -- XXX no \n like the others
test "999" $ expectParseError defaultyeardirectivep "Y 999" "bad year number"
test "12345" $ expectParse defaultyeardirectivep "Y 12345"
,test "ignoredpricecommoditydirectivep" $ do
expectParse ignoredpricecommoditydirectivep "N $\n"
,test "includedirectivep" $ do
test "include" $ expectParseErrorE includedirectivep "include nosuchfile\n" "No existing files match pattern: nosuchfile"
test "glob" $ expectParseErrorE includedirectivep "include nosuchfile*\n" "No existing files match pattern: nosuchfile*"
,test "marketpricedirectivep" $ expectParseEq marketpricedirectivep
"P 2017/01/30 BTC $922.83\n"
MarketPrice{
2018-08-20 16:39:34 +03:00
mpdate = fromGregorian 2017 1 30,
mpcommodity = "BTC",
mpamount = usd 922.83
}
,test "tagdirectivep" $ do
expectParse tagdirectivep "tag foo \n"
,test "endtagdirectivep" $ do
expectParse endtagdirectivep "end tag \n"
expectParse endtagdirectivep "pop \n"
2018-09-04 01:42:24 +03:00
,tests "journalp" [
test "empty file" $ expectParseEqE journalp "" nulljournal
2018-09-04 01:42:24 +03:00
]
]