mirror of
https://github.com/GaloisInc/macaw.git
synced 2024-12-27 16:15:12 +03:00
update parameterized-utils submodule
This commit is contained in:
parent
3e7bd01560
commit
274808a8ae
@ -254,7 +254,7 @@ instance MemWidth w => Pretty (AbsValue w tp) where
|
||||
pretty (StridedInterval s) =
|
||||
text "strided" <> parens (pretty s)
|
||||
pretty (SubValue n av) =
|
||||
text "sub" <> parens (integer (natValue n) <> comma <+> pretty av)
|
||||
text "sub" <> parens (integer (intValue n) <> comma <+> pretty av)
|
||||
pretty (StackOffset a s) = ppSet (ppv <$> Set.toList s)
|
||||
where ppv v' | v' >= 0 = text $ "rsp_" ++ show a ++ " + " ++ showHex v' ""
|
||||
| otherwise = text $ "rsp_" ++ show a ++ " - " ++ showHex (negate (toInteger v')) ""
|
||||
@ -623,7 +623,7 @@ bvadc w (FinSet l) (FinSet r) (BoolConst b)
|
||||
s | Set.size s <= maxSetSize -> FinSet s
|
||||
_ -> TopV
|
||||
where
|
||||
bottomBits v = v .&. (bit (fromInteger (natValue w)) - 1)
|
||||
bottomBits v = v .&. (bit (fromInteger (intValue w)) - 1)
|
||||
-- Strided intervals
|
||||
bvadc w v v' c
|
||||
| StridedInterval si1 <- v, StridedInterval si2 <- v' = go si1 si2
|
||||
|
@ -114,7 +114,7 @@ instance Show (MemRepr tp) where
|
||||
|
||||
-- | Return the number of bytes this uses in memory.
|
||||
memReprBytes :: MemRepr tp -> Integer
|
||||
memReprBytes (BVMemRepr x _) = natValue x
|
||||
memReprBytes (BVMemRepr x _) = intValue x
|
||||
|
||||
instance TestEquality MemRepr where
|
||||
testEquality (BVMemRepr xw xe) (BVMemRepr yw ye) =
|
||||
|
@ -332,8 +332,8 @@ valueAsStaticMultiplication v
|
||||
, Just (BVOr _ l r) <- valueAsApp v'
|
||||
, Just (BVShl _ l' (BVValue _ shl)) <- valueAsApp l
|
||||
, Just (BVShr _ _ (BVValue _ shr)) <- valueAsApp r
|
||||
, c == complement (2^shl-1) `mod` bit (fromInteger (natValue w))
|
||||
, shr >= natValue w - shl
|
||||
, c == complement (2^shl-1) `mod` bit (fromInteger (intValue w))
|
||||
, shr >= intValue w - shl
|
||||
= Just (2^shl, l')
|
||||
| otherwise = Nothing
|
||||
|
||||
|
@ -485,19 +485,19 @@ rewriteApp app = do
|
||||
NatEQ -> rewriteApp (BVUnsignedLt x y)
|
||||
NatGT _ -> rewriteApp (SExt y wx) >>= \y' -> rewriteApp (BVUnsignedLt x y')
|
||||
|
||||
BVTestBit (BVValue xw xc) (BVValue _ ic) | ic < min (natValue xw) (toInteger (maxBound :: Int)) -> do
|
||||
BVTestBit (BVValue xw xc) (BVValue _ ic) | ic < min (intValue xw) (toInteger (maxBound :: Int)) -> do
|
||||
let v = xc `testBit` fromInteger ic
|
||||
pure $! boolLitValue v
|
||||
-- If we test the greatest bit turn this to a signed equality
|
||||
BVTestBit x (BVValue _ ic)
|
||||
| w <- typeWidth x
|
||||
, ic + 1 == natValue w -> do
|
||||
, ic + 1 == intValue w -> do
|
||||
rewriteApp (BVSignedLt x (BVValue w 0))
|
||||
| w <- typeWidth x
|
||||
, ic >= natValue w -> pure (boolLitValue False)
|
||||
, ic >= intValue w -> pure (boolLitValue False)
|
||||
BVTestBit (valueAsApp -> Just (UExt x _)) (BVValue _ ic) -> do
|
||||
let xw = typeWidth x
|
||||
if ic < natValue xw then
|
||||
if ic < intValue xw then
|
||||
rewriteApp (BVTestBit x (BVValue xw ic))
|
||||
else
|
||||
pure (BoolValue False)
|
||||
@ -528,8 +528,8 @@ rewriteApp app = do
|
||||
| j + i <= maxUnsigned w -> do
|
||||
rewriteApp (BVTestBit x (BVValue w (j + i)))
|
||||
BVTestBit (valueAsApp -> Just (BVSar w x (BVValue _ j))) (BVValue _ i)
|
||||
| i < natValue w -> do
|
||||
rewriteApp (BVTestBit x (BVValue w (min (j + i) (natValue w-1))))
|
||||
| i < intValue w -> do
|
||||
rewriteApp (BVTestBit x (BVValue w (min (j + i) (intValue w-1))))
|
||||
BVTestBit (valueAsApp -> Just (BVShl w x (BVValue _ j))) (BVValue _ i)
|
||||
| j <= i -> rewriteApp (BVTestBit x (BVValue w (i - j)))
|
||||
| otherwise -> pure (boolLitValue False)
|
||||
@ -568,22 +568,22 @@ rewriteApp app = do
|
||||
|
||||
|
||||
BVShl w (BVValue _ x) (BVValue _ y) | y < toInteger (maxBound :: Int) -> do
|
||||
let s = min y (natValue w)
|
||||
let s = min y (intValue w)
|
||||
pure (BVValue w (toUnsigned w (x `shiftL` fromInteger s)))
|
||||
BVShr w (BVValue _ x) (BVValue _ y) | y < toInteger (maxBound :: Int) -> do
|
||||
let s = min y (natValue w)
|
||||
let s = min y (intValue w)
|
||||
pure (BVValue w (toUnsigned w (x `shiftR` fromInteger s)))
|
||||
BVSar w (BVValue _ x) (BVValue _ y) | y < toInteger (maxBound :: Int) -> do
|
||||
let s = min y (natValue w)
|
||||
let s = min y (intValue w)
|
||||
pure (BVValue w (toUnsigned w (toSigned w x `shiftR` fromInteger s)))
|
||||
|
||||
BVShl _ v (BVValue _ 0) -> pure v
|
||||
BVShr _ v (BVValue _ 0) -> pure v
|
||||
BVSar _ v (BVValue _ 0) -> pure v
|
||||
|
||||
BVShl w _ (BVValue _ n) | n >= natValue w ->
|
||||
BVShl w _ (BVValue _ n) | n >= intValue w ->
|
||||
pure (BVValue w 0)
|
||||
BVShr w _ (BVValue _ n) | n >= natValue w ->
|
||||
BVShr w _ (BVValue _ n) | n >= intValue w ->
|
||||
pure (BVValue w 0)
|
||||
|
||||
PopCount w (BVValue _ x) -> do
|
||||
@ -591,8 +591,8 @@ rewriteApp app = do
|
||||
Bsr w (BVValue _ x) -> do
|
||||
let i = fromJust $ find
|
||||
(\j -> toUnsigned w x `shiftR` fromIntegral j == 0)
|
||||
[0 .. natValue w]
|
||||
pure $ BVValue w $ natValue w - i
|
||||
[0 .. intValue w]
|
||||
pure $ BVValue w $ intValue w - i
|
||||
|
||||
Eq (BoolValue x) (BoolValue y) -> do
|
||||
pure $! boolLitValue (x == y)
|
||||
|
2
deps/parameterized-utils
vendored
2
deps/parameterized-utils
vendored
@ -1 +1 @@
|
||||
Subproject commit 3e5fdd357d5fdd466dc0370ae730590d832529aa
|
||||
Subproject commit 0aa7f1dd1d8dc47084f6a3728e1c60766ef67494
|
@ -1050,13 +1050,13 @@ countZeros :: (1 <= w) =>
|
||||
CrucGen arch ids h s (CR.Atom s (MM.LLVMPointerType w))
|
||||
countZeros w f vx = do
|
||||
cx <- v2c vx >>= toBits w
|
||||
isZeros <- forM [0..natValue w-1] $ \i -> do
|
||||
isZeros <- forM [0..intValue w-1] $ \i -> do
|
||||
shiftAmt <- bvLit w i
|
||||
shiftedx <- appAtom (f w cx shiftAmt)
|
||||
xIsNonzero <- appAtom (C.BVNonzero w shiftedx)
|
||||
appAtom (C.BoolToBV w xIsNonzero)
|
||||
czero <- bvLit w 0
|
||||
cw <- bvLit w (natValue w)
|
||||
cw <- bvLit w (intValue w)
|
||||
cn <- foldM ((appAtom .) . C.BVAdd w) czero isZeros
|
||||
appAtom (C.BVSub w cw cn) >>= fromBits w
|
||||
|
||||
|
@ -320,7 +320,7 @@ doPtrAnd = ptrOp $ \sym _mem w xPtr xBits yPtr yBits x y ->
|
||||
let nw = M.addrWidthNatRepr w
|
||||
doPtrAlign amt isP isB v
|
||||
| amt == 0 = return v
|
||||
| amt == natValue nw = mkNullPointer sym nw
|
||||
| amt == intValue nw = mkNullPointer sym nw
|
||||
| Just 0 <- asNat (ptrBase v) = llvmPointer_bv sym =<<
|
||||
bvAndBits sym (asBits x) (asBits y)
|
||||
|
||||
@ -730,7 +730,7 @@ isAlignMask :: (IsSymInterface sym) => LLVMPtr sym w -> Maybe Integer
|
||||
isAlignMask v =
|
||||
do 0 <- asNat (ptrBase v)
|
||||
let off = asBits v
|
||||
w = fromInteger (natValue (bvWidth off))
|
||||
w = fromInteger (intValue (bvWidth off))
|
||||
k <- asUnsignedBV off
|
||||
let (zeros,ones) = break (testBit k) (take w [ 0 .. ])
|
||||
guard (all (testBit k) ones)
|
||||
|
@ -322,7 +322,7 @@ defaultRegisterViewRead
|
||||
-> Expr ids (BVType n)
|
||||
defaultRegisterViewRead b n _rn v0
|
||||
| LeqProof <- leqTrans (LeqProof :: LeqProof 1 n) (LeqProof :: LeqProof n m) =
|
||||
bvTrunc n $ v0 `bvShr` bvLit (typeWidth v0) (natValue b)
|
||||
bvTrunc n $ v0 `bvShr` bvLit (typeWidth v0) (intValue b)
|
||||
|
||||
-- | Read a register via a view.
|
||||
--
|
||||
@ -399,13 +399,13 @@ defaultRegisterViewWrite b n rn v0 write_val
|
||||
-- (0^|h ++ m| ++ l)
|
||||
-- highOrderBits .|. lowOrderBits
|
||||
let -- Generate the mask for the new bits
|
||||
myMask = maxUnsigned n `Bits.shiftL` fromInteger (natValue b)
|
||||
myMask = maxUnsigned n `Bits.shiftL` fromInteger (intValue b)
|
||||
-- Generate max for old bits.
|
||||
notMyMask = Bits.complement myMask
|
||||
prevBits = v0 .&. bvLit w notMyMask
|
||||
w = typeWidth v0
|
||||
cl = typeWidth rn
|
||||
b' = natValue b
|
||||
b' = intValue b
|
||||
middleOrderBits = uext cl write_val `bvShl` bvLit cl b'
|
||||
in prevBits .|. middleOrderBits
|
||||
|
||||
@ -634,12 +634,12 @@ ppLocation ppAddr loc = case loc of
|
||||
ppReg :: RegisterView w n cl -> Doc
|
||||
ppReg rv =
|
||||
text $ "%" ++ show (_registerViewReg rv) ++
|
||||
if b == 0 && s == (fromIntegral $ natValue (typeWidth $ _registerViewReg rv))
|
||||
if b == 0 && s == (fromIntegral $ intValue (typeWidth $ _registerViewReg rv))
|
||||
then ""
|
||||
else "[" ++ show b ++ ":" ++ show s ++ "]"
|
||||
where
|
||||
b = natValue $ _registerViewBase rv
|
||||
s = natValue $ _registerViewSize rv
|
||||
b = intValue $ _registerViewBase rv
|
||||
s = intValue $ _registerViewSize rv
|
||||
|
||||
------------------------------------------------------------------------
|
||||
-- Register-location smart constructors.
|
||||
@ -862,7 +862,7 @@ packWord (R.BitPacking sz bits) = do
|
||||
return $ bvLit sz (if b then 1 `Bits.shiftL` widthVal off else (0 :: Integer))
|
||||
getMoveBits (R.RegisterBit reg off) = do
|
||||
v <- uext sz <$> get (fullRegister reg)
|
||||
return $ v `bvShl` bvLit sz (natValue off)
|
||||
return $ v `bvShl` bvLit sz (intValue off)
|
||||
injs <- mapM getMoveBits bits
|
||||
return (foldl1 (.|.) injs)
|
||||
|
||||
@ -877,7 +877,7 @@ unpackWord (R.BitPacking sz bits) v = mapM_ unpackOne bits
|
||||
unpackOne R.ConstantBit{} = return ()
|
||||
unpackOne (R.RegisterBit reg off) = do
|
||||
let res_w = typeWidth reg
|
||||
fullRegister reg .= bvTrunc res_w (v `bvShr` bvLit sz (natValue off))
|
||||
fullRegister reg .= bvTrunc res_w (v `bvShr` bvLit sz (intValue off))
|
||||
|
||||
------------------------------------------------------------------------
|
||||
-- Values
|
||||
@ -1083,7 +1083,7 @@ bvCat h l =
|
||||
case ( m_le_m_plus_n , n_le_m_plus_n , _1_le_m_plus_n ) of
|
||||
(LeqProof, LeqProof, LeqProof) ->
|
||||
let highOrderBits =
|
||||
uext m_plus_n h `bvShl` bvLit m_plus_n (natValue $ n)
|
||||
uext m_plus_n h `bvShl` bvLit m_plus_n (intValue $ n)
|
||||
lowOrderBits = uext m_plus_n l
|
||||
in highOrderBits .|. lowOrderBits
|
||||
|
||||
@ -1117,7 +1117,7 @@ bvSplit v =
|
||||
LeqProof ->
|
||||
case leqAdd (leqRefl sz) sz :: LeqProof n (n + n) of
|
||||
LeqProof ->
|
||||
let sh = bvLit (typeWidth v) (natValue sz)
|
||||
let sh = bvLit (typeWidth v) (intValue sz)
|
||||
in (bvTrunc sz (v `bvShr` sh), bvTrunc sz v)
|
||||
|
||||
-- | Vectorization
|
||||
@ -1157,11 +1157,11 @@ vectorize2 sz f x y = let xs = bvVectorize sz x
|
||||
-- | Rotations
|
||||
bvRol :: (1 <= n) => Expr ids (BVType n) -> Expr ids (BVType n) -> Expr ids (BVType n)
|
||||
bvRol v n = bvShl v n .|. bvShr v bits_less_n
|
||||
where bits_less_n = bvLit (typeWidth v) (natValue $ typeWidth v) .- n
|
||||
where bits_less_n = bvLit (typeWidth v) (intValue $ typeWidth v) .- n
|
||||
|
||||
bvRor :: (1 <= n) => Expr ids (BVType n) -> Expr ids (BVType n) -> Expr ids (BVType n)
|
||||
bvRor v n = bvShr v n .|. bvShl v bits_less_n
|
||||
where bits_less_n = bvLit (typeWidth v) (natValue $ typeWidth v) .- n
|
||||
where bits_less_n = bvLit (typeWidth v) (intValue $ typeWidth v) .- n
|
||||
|
||||
-- | Shifts, the semantics is undefined for shifts >= the width of the first argument.
|
||||
--
|
||||
@ -1195,7 +1195,7 @@ bvShl x y
|
||||
x_base .&. bvLit w (negate (2^x_shft) ::Integer)
|
||||
|
||||
| Just yv <- asUnsignedBVLit y
|
||||
, yv >= natValue (typeWidth x) = bvLit (typeWidth x) (0 :: Integer)
|
||||
, yv >= intValue (typeWidth x) = bvLit (typeWidth x) (0 :: Integer)
|
||||
|
||||
| otherwise = app $ BVShl (typeWidth x) x y
|
||||
|
||||
@ -1285,7 +1285,7 @@ bvBit x y
|
||||
boolValue (xv `Bits.testBit` fromInteger yv)
|
||||
| Just (Trunc xe w) <- asApp x
|
||||
, Just LeqProof <- testLeq n1 (typeWidth xe)
|
||||
, Just yv <- asUnsignedBVLit y = assert (0 <= yv && yv < natValue w) $
|
||||
, Just yv <- asUnsignedBVLit y = assert (0 <= yv && yv < intValue w) $
|
||||
bvBit xe (ValueExpr (BVValue (typeWidth xe) yv))
|
||||
|
||||
| otherwise =
|
||||
|
@ -831,7 +831,7 @@ exec_sh lw l val val_setter cf_setter of_setter = do
|
||||
-- bound on the time the shift takes, with a mask of 63 in the case
|
||||
-- of a 64 bit operand, and 31 in the other cases.
|
||||
let w = typeWidth (repValSizeMemRepr lw)
|
||||
let nbits = if natValue w == 64 then 64 else 32
|
||||
let nbits = if intValue w == 64 then 64 else 32
|
||||
let low_count = count .&. bvLit n8 (nbits - 1)
|
||||
-- Compute result.
|
||||
let res = val_setter v (uext (typeWidth v) low_count)
|
||||
@ -878,7 +878,7 @@ def_sh mnem val_setter cf_setter of_setter = defBinary mnem $ \_ii loc val -> do
|
||||
def_shl :: InstructionDef
|
||||
def_shl = def_sh "shl" bvShl set_cf set_of
|
||||
where set_cf w v i =
|
||||
(i `bvUle` bvLit n8 (natValue w)) .&&. bvBit v (bvLit w (natValue w) .- uext w i)
|
||||
(i `bvUle` bvLit n8 (intValue w)) .&&. bvBit v (bvLit w (intValue w) .- uext w i)
|
||||
set_of v _ = msb v
|
||||
|
||||
def_shr :: InstructionDef
|
||||
@ -894,9 +894,9 @@ def_sar = def_sh "sar" bvSar set_cf set_of
|
||||
-- the index - 1 or the most-significant bit depending on the shift value.
|
||||
set_cf w v i = do
|
||||
-- Check if w < i
|
||||
let notInRange = bvUlt (bvLit n8 (natValue w)) i
|
||||
let notInRange = bvUlt (bvLit n8 (intValue w)) i
|
||||
-- Get most-significant bit
|
||||
let msb_v = bvBit v (bvLit w (natValue w-1))
|
||||
let msb_v = bvBit v (bvLit w (intValue w-1))
|
||||
bvBit v (uext w i .- bvLit w 1) .||. (notInRange .&&. msb_v)
|
||||
set_of _ _ = false
|
||||
|
||||
@ -928,7 +928,7 @@ def_shXd mnemonic val_setter cf_setter of_setter =
|
||||
def_shld :: InstructionDef
|
||||
def_shld = def_shXd "shld" exec_shld set_cf set_of
|
||||
where set_cf w v i =
|
||||
(i `bvUle` bvLit n8 (natValue w)) .&&. bvBit v (bvLit w (natValue w) .- uext w i)
|
||||
(i `bvUle` bvLit n8 (intValue w)) .&&. bvBit v (bvLit w (intValue w) .- uext w i)
|
||||
set_of v _ = msb v
|
||||
|
||||
exec_shld :: forall n ids . (1 <= n) =>
|
||||
@ -975,7 +975,7 @@ exec_rol l count = do
|
||||
low_count = uext (typeWidth v) count .&. countMASK
|
||||
-- countMASK is sufficient for 32 and 64 bit operand sizes, but not 16 or
|
||||
-- 8, so we need to mask those off again...
|
||||
effectiveMASK = bvLit (typeWidth v) (natValue (typeWidth v) - 1)
|
||||
effectiveMASK = bvLit (typeWidth v) (intValue (typeWidth v) - 1)
|
||||
effective = uext (typeWidth v) count .&. effectiveMASK
|
||||
r = bvRol v effective
|
||||
|
||||
@ -1010,19 +1010,19 @@ exec_ror l count = do
|
||||
low_count = uext (typeWidth v) count .&. countMASK
|
||||
-- countMASK is sufficient for 32 and 64 bit operand sizes, but not 16 or
|
||||
-- 8, so we need to mask those off again...
|
||||
effectiveMASK = bvLit (typeWidth v) (natValue (typeWidth v) - 1)
|
||||
effectiveMASK = bvLit (typeWidth v) (intValue (typeWidth v) - 1)
|
||||
effective = uext (typeWidth v) count .&. effectiveMASK
|
||||
r = bvRor v effective
|
||||
|
||||
l .= r
|
||||
let p = is_zero low_count
|
||||
let new_cf = bvBit r (bvLit (typeWidth r) (natValue (typeWidth r) - 1))
|
||||
let new_cf = bvBit r (bvLit (typeWidth r) (intValue (typeWidth r) - 1))
|
||||
modify cf_loc $ \old_cf -> mux p old_cf new_cf
|
||||
u <- make_undefined knownRepr
|
||||
modify of_loc $ \old_of ->
|
||||
mux p old_of $
|
||||
mux (low_count .=. bvLit (typeWidth low_count) 1)
|
||||
(msb r `boolXor` bvBit r (bvLit (typeWidth r) (natValue (typeWidth v) - 2)))
|
||||
(msb r `boolXor` bvBit r (bvLit (typeWidth r) (intValue (typeWidth v) - 2)))
|
||||
u
|
||||
|
||||
-- ** Bit and Byte Instructions
|
||||
@ -1468,12 +1468,12 @@ def_lods = defBinary "lods" $ \ii loc loc' -> do
|
||||
def_lodsx :: (1 <= elsz) => String -> NatRepr elsz -> InstructionDef
|
||||
def_lodsx suf elsz = defNullaryPrefix ("lods" ++ suf) $ \pfx -> do
|
||||
let rep = pfx == F.RepPrefix
|
||||
case natValue elsz of
|
||||
case intValue elsz of
|
||||
8 -> exec_lods rep ByteRepVal
|
||||
16 -> exec_lods rep WordRepVal
|
||||
32 -> exec_lods rep DWordRepVal
|
||||
64 -> exec_lods rep QWordRepVal
|
||||
_ -> error $ "lodsx given bad size " ++ show (natValue elsz)
|
||||
_ -> error $ "lodsx given bad size " ++ show (intValue elsz)
|
||||
|
||||
-- | STOS/STOSB Store string/Store byte string
|
||||
-- STOS/STOSW Store string/Store word string
|
||||
@ -1899,7 +1899,7 @@ def_psllx suf elsz = defBinaryLVpoly ("psll" ++ suf) $ \l count -> do
|
||||
-- truncate e.g. 2^31 to 0, so we saturate if the size is over
|
||||
-- the number of bits we want to shift. We can always fit the
|
||||
-- width into count bits (assuming we are passed 16, 32, or 64).
|
||||
nbits = bvLit (typeWidth count) (natValue elsz)
|
||||
nbits = bvLit (typeWidth count) (intValue elsz)
|
||||
countsz = case testNatCases (typeWidth count) elsz of
|
||||
NatCaseLT LeqProof -> uext' elsz count
|
||||
NatCaseEQ -> count
|
||||
|
@ -150,7 +150,7 @@ avxInsert mnem =
|
||||
avx4 mnem $ \arg1 arg2 arg3 arg4 ->
|
||||
do SomeBV vec <- getSomeBVValue arg2
|
||||
SomeBV el <- getSomeBVValue arg3
|
||||
Some i <- case someNat (fromIntegral arg4) of
|
||||
Some i <- case someNat (fromIntegral arg4 :: Integer) of
|
||||
Just ok -> return ok
|
||||
Nothing -> err "Invalid index"
|
||||
let vw = typeWidth vec
|
||||
|
Loading…
Reference in New Issue
Block a user